diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-08 13:10:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-08 13:10:11 -0400 |
commit | 45d7f32c7a43cbb9592886d38190e379e2eb2226 (patch) | |
tree | ea68b67b1d2127527d856248c0485f2ed7e50088 /arch | |
parent | 53bcef60633086ad73683d01a4ef9ca678484d2d (diff) | |
parent | ab11b487402f97975f3ac1eeea09c82f4431481e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile
* git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux-tile:
arch/tile: check kmalloc() result
arch/tile: catch up on various minor cleanups.
arch/tile: avoid erroneous error return for PTRACE_POKEUSR.
tile: set ARCH_KMALLOC_MINALIGN
tile: remove homegrown L1_CACHE_ALIGN macro
arch/tile: Miscellaneous cleanup changes.
arch/tile: Split the icache flush code off to a generic <arch> header.
arch/tile: Fix bug in support for atomic64_xx() ops.
arch/tile: Shrink the tile-opcode files considerably.
arch/tile: Add driver to enable access to the user dynamic network.
arch/tile: Enable more sophisticated IRQ model for 32-bit chips.
Move list types from <linux/list.h> to <linux/types.h>.
Add wait4() back to the set of <asm-generic/unistd.h> syscalls.
Revert adding some arch-specific signal syscalls to <linux/syscalls.h>.
arch/tile: Do not use GFP_KERNEL for dma_alloc_coherent(). Feedback from fujita.tomonori@lab.ntt.co.jp.
arch/tile: core support for Tilera 32-bit chips.
Fix up the "generic" unistd.h ABI to be more useful.
Diffstat (limited to 'arch')
203 files changed, 39222 insertions, 0 deletions
diff --git a/arch/tile/Kbuild b/arch/tile/Kbuild new file mode 100644 index 000000000000..a9b922716092 --- /dev/null +++ b/arch/tile/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | |||
2 | obj-y += kernel/ | ||
3 | obj-y += mm/ | ||
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig new file mode 100644 index 000000000000..1eb308cb711a --- /dev/null +++ b/arch/tile/Kconfig | |||
@@ -0,0 +1,356 @@ | |||
1 | # For a description of the syntax of this configuration file, | ||
2 | # see Documentation/kbuild/config-language.txt. | ||
3 | |||
4 | config MMU | ||
5 | def_bool y | ||
6 | |||
7 | config GENERIC_CSUM | ||
8 | def_bool y | ||
9 | |||
10 | config GENERIC_HARDIRQS | ||
11 | def_bool y | ||
12 | |||
13 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
14 | def_bool y | ||
15 | |||
16 | config GENERIC_IRQ_PROBE | ||
17 | def_bool y | ||
18 | |||
19 | config GENERIC_PENDING_IRQ | ||
20 | def_bool y | ||
21 | depends on GENERIC_HARDIRQS && SMP | ||
22 | |||
23 | config SEMAPHORE_SLEEPERS | ||
24 | def_bool y | ||
25 | |||
26 | config HAVE_ARCH_ALLOC_REMAP | ||
27 | def_bool y | ||
28 | |||
29 | config HAVE_SETUP_PER_CPU_AREA | ||
30 | def_bool y | ||
31 | |||
32 | config NEED_PER_CPU_PAGE_FIRST_CHUNK | ||
33 | def_bool y | ||
34 | |||
35 | config SYS_SUPPORTS_HUGETLBFS | ||
36 | def_bool y | ||
37 | |||
38 | config GENERIC_TIME | ||
39 | def_bool y | ||
40 | |||
41 | config GENERIC_CLOCKEVENTS | ||
42 | def_bool y | ||
43 | |||
44 | # FIXME: tilegx can implement a more efficent rwsem. | ||
45 | config RWSEM_GENERIC_SPINLOCK | ||
46 | def_bool y | ||
47 | |||
48 | # We have a very flat architecture from a migration point of view, | ||
49 | # so save boot time by presetting this (particularly useful on tile-sim). | ||
50 | config DEFAULT_MIGRATION_COST | ||
51 | int | ||
52 | default "10000000" | ||
53 | |||
54 | # We only support gcc 4.4 and above, so this should work. | ||
55 | config ARCH_SUPPORTS_OPTIMIZED_INLINING | ||
56 | def_bool y | ||
57 | |||
58 | config ARCH_PHYS_ADDR_T_64BIT | ||
59 | def_bool y | ||
60 | |||
61 | config LOCKDEP_SUPPORT | ||
62 | def_bool y | ||
63 | |||
64 | config STACKTRACE_SUPPORT | ||
65 | def_bool y | ||
66 | select STACKTRACE | ||
67 | |||
68 | # We use discontigmem for now; at some point we may want to switch | ||
69 | # to sparsemem (Tilera bug 7996). | ||
70 | config ARCH_DISCONTIGMEM_ENABLE | ||
71 | def_bool y | ||
72 | |||
73 | config ARCH_DISCONTIGMEM_DEFAULT | ||
74 | def_bool y | ||
75 | |||
76 | config TRACE_IRQFLAGS_SUPPORT | ||
77 | def_bool y | ||
78 | |||
79 | config STRICT_DEVMEM | ||
80 | def_bool y | ||
81 | |||
82 | # SMP is required for Tilera Linux. | ||
83 | config SMP | ||
84 | def_bool y | ||
85 | |||
86 | # Allow checking for compile-time determined overflow errors in | ||
87 | # copy_from_user(). There are still unprovable places in the | ||
88 | # generic code as of 2.6.34, so this option is not really compatible | ||
89 | # with -Werror, which is more useful in general. | ||
90 | config DEBUG_COPY_FROM_USER | ||
91 | def_bool n | ||
92 | |||
93 | config HVC_TILE | ||
94 | select HVC_DRIVER | ||
95 | def_bool y | ||
96 | |||
97 | config TILE | ||
98 | def_bool y | ||
99 | select GENERIC_FIND_FIRST_BIT | ||
100 | select GENERIC_FIND_NEXT_BIT | ||
101 | select USE_GENERIC_SMP_HELPERS | ||
102 | select CC_OPTIMIZE_FOR_SIZE | ||
103 | |||
104 | # FIXME: investigate whether we need/want these options. | ||
105 | # select HAVE_IOREMAP_PROT | ||
106 | # select HAVE_OPTPROBES | ||
107 | # select HAVE_REGS_AND_STACK_ACCESS_API | ||
108 | # select HAVE_HW_BREAKPOINT | ||
109 | # select PERF_EVENTS | ||
110 | # select HAVE_USER_RETURN_NOTIFIER | ||
111 | # config NO_BOOTMEM | ||
112 | # config ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
113 | # config HUGETLB_PAGE_SIZE_VARIABLE | ||
114 | |||
115 | |||
116 | mainmenu "Linux/TILE Kernel Configuration" | ||
117 | |||
118 | # Please note: TILE-Gx support is not yet finalized; this is | ||
119 | # the preliminary support. TILE-Gx drivers are only provided | ||
120 | # with the alpha or beta test versions for Tilera customers. | ||
121 | config TILEGX | ||
122 | depends on EXPERIMENTAL | ||
123 | bool "Building with TILE-Gx (64-bit) compiler and toolchain" | ||
124 | |||
125 | config 64BIT | ||
126 | depends on TILEGX | ||
127 | def_bool y | ||
128 | |||
129 | config ARCH_DEFCONFIG | ||
130 | string | ||
131 | default "arch/tile/configs/tile_defconfig" if !TILEGX | ||
132 | default "arch/tile/configs/tilegx_defconfig" if TILEGX | ||
133 | |||
134 | source "init/Kconfig" | ||
135 | |||
136 | menu "Tilera-specific configuration" | ||
137 | |||
138 | config NR_CPUS | ||
139 | int "Maximum number of tiles (2-255)" | ||
140 | range 2 255 | ||
141 | depends on SMP | ||
142 | default "64" | ||
143 | ---help--- | ||
144 | Building with 64 is the recommended value, but a slightly | ||
145 | smaller kernel memory footprint results from using a smaller | ||
146 | value on chips with fewer tiles. | ||
147 | |||
148 | source "kernel/time/Kconfig" | ||
149 | |||
150 | source "kernel/Kconfig.hz" | ||
151 | |||
152 | config KEXEC | ||
153 | bool "kexec system call" | ||
154 | ---help--- | ||
155 | kexec is a system call that implements the ability to shutdown your | ||
156 | current kernel, and to start another kernel. It is like a reboot | ||
157 | but it is independent of the system firmware. It is used | ||
158 | to implement the "mboot" Tilera booter. | ||
159 | |||
160 | The name comes from the similarity to the exec system call. | ||
161 | |||
162 | config COMPAT | ||
163 | bool "Support 32-bit TILE-Gx binaries in addition to 64-bit" | ||
164 | depends on TILEGX | ||
165 | select COMPAT_BINFMT_ELF | ||
166 | default y | ||
167 | ---help--- | ||
168 | If enabled, the kernel will support running TILE-Gx binaries | ||
169 | that were built with the -m32 option. | ||
170 | |||
171 | config SYSVIPC_COMPAT | ||
172 | def_bool y | ||
173 | depends on COMPAT && SYSVIPC | ||
174 | |||
175 | # We do not currently support disabling HIGHMEM on tile64 and tilepro. | ||
176 | config HIGHMEM | ||
177 | bool # "Support for more than 512 MB of RAM" | ||
178 | default !TILEGX | ||
179 | ---help--- | ||
180 | Linux can use the full amount of RAM in the system by | ||
181 | default. However, the address space of TILE processors is | ||
182 | only 4 Gigabytes large. That means that, if you have a large | ||
183 | amount of physical memory, not all of it can be "permanently | ||
184 | mapped" by the kernel. The physical memory that's not | ||
185 | permanently mapped is called "high memory". | ||
186 | |||
187 | If you are compiling a kernel which will never run on a | ||
188 | machine with more than 512 MB total physical RAM, answer | ||
189 | "false" here. This will result in the kernel mapping all of | ||
190 | physical memory into the top 1 GB of virtual memory space. | ||
191 | |||
192 | If unsure, say "true". | ||
193 | |||
194 | # We do not currently support disabling NUMA. | ||
195 | config NUMA | ||
196 | bool # "NUMA Memory Allocation and Scheduler Support" | ||
197 | depends on SMP && DISCONTIGMEM | ||
198 | default y | ||
199 | ---help--- | ||
200 | NUMA memory allocation is required for TILE processors | ||
201 | unless booting with memory striping enabled in the | ||
202 | hypervisor, or with only a single memory controller. | ||
203 | It is recommended that this option always be enabled. | ||
204 | |||
205 | config NODES_SHIFT | ||
206 | int "Log base 2 of the max number of memory controllers" | ||
207 | default 2 | ||
208 | depends on NEED_MULTIPLE_NODES | ||
209 | ---help--- | ||
210 | By default, 2, i.e. 2^2 == 4 DDR2 controllers. | ||
211 | In a system with more controllers, this value should be raised. | ||
212 | |||
213 | # Need 16MB areas to enable hugetlb | ||
214 | # See build-time check in arch/tile/mm/init.c. | ||
215 | config FORCE_MAX_ZONEORDER | ||
216 | int | ||
217 | default 9 | ||
218 | |||
219 | choice | ||
220 | depends on !TILEGX | ||
221 | prompt "Memory split" if EMBEDDED | ||
222 | default VMSPLIT_3G | ||
223 | ---help--- | ||
224 | Select the desired split between kernel and user memory. | ||
225 | |||
226 | If the address range available to the kernel is less than the | ||
227 | physical memory installed, the remaining memory will be available | ||
228 | as "high memory". Accessing high memory is a little more costly | ||
229 | than low memory, as it needs to be mapped into the kernel first. | ||
230 | Note that increasing the kernel address space limits the range | ||
231 | available to user programs, making the address space there | ||
232 | tighter. Selecting anything other than the default 3G/1G split | ||
233 | will also likely make your kernel incompatible with binary-only | ||
234 | kernel modules. | ||
235 | |||
236 | If you are not absolutely sure what you are doing, leave this | ||
237 | option alone! | ||
238 | |||
239 | config VMSPLIT_375G | ||
240 | bool "3.75G/0.25G user/kernel split (no kernel networking)" | ||
241 | config VMSPLIT_35G | ||
242 | bool "3.5G/0.5G user/kernel split" | ||
243 | config VMSPLIT_3G | ||
244 | bool "3G/1G user/kernel split" | ||
245 | config VMSPLIT_3G_OPT | ||
246 | bool "3G/1G user/kernel split (for full 1G low memory)" | ||
247 | config VMSPLIT_2G | ||
248 | bool "2G/2G user/kernel split" | ||
249 | config VMSPLIT_1G | ||
250 | bool "1G/3G user/kernel split" | ||
251 | endchoice | ||
252 | |||
253 | config PAGE_OFFSET | ||
254 | hex | ||
255 | default 0xF0000000 if VMSPLIT_375G | ||
256 | default 0xE0000000 if VMSPLIT_35G | ||
257 | default 0xB0000000 if VMSPLIT_3G_OPT | ||
258 | default 0x80000000 if VMSPLIT_2G | ||
259 | default 0x40000000 if VMSPLIT_1G | ||
260 | default 0xC0000000 | ||
261 | |||
262 | source "mm/Kconfig" | ||
263 | |||
264 | config CMDLINE_BOOL | ||
265 | bool "Built-in kernel command line" | ||
266 | default n | ||
267 | ---help--- | ||
268 | Allow for specifying boot arguments to the kernel at | ||
269 | build time. On some systems (e.g. embedded ones), it is | ||
270 | necessary or convenient to provide some or all of the | ||
271 | kernel boot arguments with the kernel itself (that is, | ||
272 | to not rely on the boot loader to provide them.) | ||
273 | |||
274 | To compile command line arguments into the kernel, | ||
275 | set this option to 'Y', then fill in the | ||
276 | the boot arguments in CONFIG_CMDLINE. | ||
277 | |||
278 | Systems with fully functional boot loaders (e.g. mboot, or | ||
279 | if booting over PCI) should leave this option set to 'N'. | ||
280 | |||
281 | config CMDLINE | ||
282 | string "Built-in kernel command string" | ||
283 | depends on CMDLINE_BOOL | ||
284 | default "" | ||
285 | ---help--- | ||
286 | Enter arguments here that should be compiled into the kernel | ||
287 | image and used at boot time. If the boot loader provides a | ||
288 | command line at boot time, it is appended to this string to | ||
289 | form the full kernel command line, when the system boots. | ||
290 | |||
291 | However, you can use the CONFIG_CMDLINE_OVERRIDE option to | ||
292 | change this behavior. | ||
293 | |||
294 | In most cases, the command line (whether built-in or provided | ||
295 | by the boot loader) should specify the device for the root | ||
296 | file system. | ||
297 | |||
298 | config CMDLINE_OVERRIDE | ||
299 | bool "Built-in command line overrides boot loader arguments" | ||
300 | default n | ||
301 | depends on CMDLINE_BOOL | ||
302 | ---help--- | ||
303 | Set this option to 'Y' to have the kernel ignore the boot loader | ||
304 | command line, and use ONLY the built-in command line. | ||
305 | |||
306 | This is used to work around broken boot loaders. This should | ||
307 | be set to 'N' under normal conditions. | ||
308 | |||
309 | config VMALLOC_RESERVE | ||
310 | hex | ||
311 | default 0x1000000 | ||
312 | |||
313 | config HARDWALL | ||
314 | bool "Hardwall support to allow access to user dynamic network" | ||
315 | default y | ||
316 | |||
317 | endmenu # Tilera-specific configuration | ||
318 | |||
319 | menu "Bus options" | ||
320 | |||
321 | config NO_IOMEM | ||
322 | def_bool !PCI | ||
323 | |||
324 | config NO_IOPORT | ||
325 | def_bool !PCI | ||
326 | |||
327 | source "drivers/pci/Kconfig" | ||
328 | |||
329 | source "drivers/pci/hotplug/Kconfig" | ||
330 | |||
331 | endmenu | ||
332 | |||
333 | menu "Executable file formats" | ||
334 | |||
335 | # only elf supported | ||
336 | config KCORE_ELF | ||
337 | def_bool y | ||
338 | depends on PROC_FS | ||
339 | |||
340 | source "fs/Kconfig.binfmt" | ||
341 | |||
342 | endmenu | ||
343 | |||
344 | source "net/Kconfig" | ||
345 | |||
346 | source "drivers/Kconfig" | ||
347 | |||
348 | source "fs/Kconfig" | ||
349 | |||
350 | source "arch/tile/Kconfig.debug" | ||
351 | |||
352 | source "security/Kconfig" | ||
353 | |||
354 | source "crypto/Kconfig" | ||
355 | |||
356 | source "lib/Kconfig" | ||
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug new file mode 100644 index 000000000000..a81f0fbf7e60 --- /dev/null +++ b/arch/tile/Kconfig.debug | |||
@@ -0,0 +1,43 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | source "lib/Kconfig.debug" | ||
4 | |||
5 | config EARLY_PRINTK | ||
6 | bool "Early printk" if EMBEDDED && DEBUG_KERNEL | ||
7 | default y | ||
8 | help | ||
9 | Write kernel log output directly via the hypervisor console. | ||
10 | |||
11 | This is useful for kernel debugging when your machine crashes very | ||
12 | early before the console code is initialized. For normal operation | ||
13 | it is not recommended because it looks ugly and doesn't cooperate | ||
14 | with klogd/syslogd. You should normally N here, | ||
15 | unless you want to debug such a crash. | ||
16 | |||
17 | config DEBUG_STACKOVERFLOW | ||
18 | bool "Check for stack overflows" | ||
19 | depends on DEBUG_KERNEL | ||
20 | help | ||
21 | This option will cause messages to be printed if free stack space | ||
22 | drops below a certain limit. | ||
23 | |||
24 | config DEBUG_STACK_USAGE | ||
25 | bool "Stack utilization instrumentation" | ||
26 | depends on DEBUG_KERNEL | ||
27 | help | ||
28 | Enables the display of the minimum amount of free stack which each | ||
29 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
30 | |||
31 | This option will slow down process creation somewhat. | ||
32 | |||
33 | config DEBUG_EXTRA_FLAGS | ||
34 | string "Additional compiler arguments when building with '-g'" | ||
35 | depends on DEBUG_INFO | ||
36 | default "" | ||
37 | help | ||
38 | Debug info can be large, and flags like | ||
39 | `-femit-struct-debug-baseonly' can reduce the kernel file | ||
40 | size and build time noticeably. Such flags are often | ||
41 | helpful if the main use of debug info is line number info. | ||
42 | |||
43 | endmenu | ||
diff --git a/arch/tile/Makefile b/arch/tile/Makefile new file mode 100644 index 000000000000..07c4318c0629 --- /dev/null +++ b/arch/tile/Makefile | |||
@@ -0,0 +1,52 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # This file is included by the global makefile so that you can add your own | ||
7 | # architecture-specific flags and dependencies. Remember to do have actions | ||
8 | # for "archclean" and "archdep" for cleaning up and making dependencies for | ||
9 | # this architecture | ||
10 | |||
11 | ifeq ($(CROSS_COMPILE),) | ||
12 | # If building with TILERA_ROOT set (i.e. using the Tilera Multicore | ||
13 | # Development Environment) we can set CROSS_COMPILE based on that. | ||
14 | ifdef TILERA_ROOT | ||
15 | CROSS_COMPILE = $(TILERA_ROOT)/bin/tile- | ||
16 | endif | ||
17 | endif | ||
18 | |||
19 | # If we're not cross-compiling, make sure we're on the right architecture. | ||
20 | ifeq ($(CROSS_COMPILE),) | ||
21 | HOST_ARCH = $(shell uname -m) | ||
22 | ifneq ($(HOST_ARCH),$(ARCH)) | ||
23 | $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH)) | ||
24 | endif | ||
25 | endif | ||
26 | |||
27 | |||
28 | KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) | ||
29 | |||
30 | LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | ||
31 | |||
32 | # Provide the path to use for "make defconfig". | ||
33 | KBUILD_DEFCONFIG := $(ARCH)_defconfig | ||
34 | |||
35 | # Used as a file extension when useful, e.g. head_$(BITS).o | ||
36 | # Not needed for (e.g.) "$(CC) -m32" since the compiler automatically | ||
37 | # uses the right default anyway. | ||
38 | export BITS | ||
39 | ifeq ($(CONFIG_TILEGX),y) | ||
40 | BITS := 64 | ||
41 | else | ||
42 | BITS := 32 | ||
43 | endif | ||
44 | |||
45 | head-y := arch/tile/kernel/head_$(BITS).o | ||
46 | |||
47 | libs-y += arch/tile/lib/ | ||
48 | libs-y += $(LIBGCC_PATH) | ||
49 | |||
50 | |||
51 | # See arch/tile/Kbuild for content of core part of the kernel | ||
52 | core-y += arch/tile/ | ||
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig new file mode 100644 index 000000000000..f34c70b46c64 --- /dev/null +++ b/arch/tile/configs/tile_defconfig | |||
@@ -0,0 +1,1290 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.34 | ||
4 | # Thu Jun 3 13:20:05 2010 | ||
5 | # | ||
6 | CONFIG_MMU=y | ||
7 | CONFIG_GENERIC_CSUM=y | ||
8 | CONFIG_GENERIC_HARDIRQS=y | ||
9 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
10 | CONFIG_GENERIC_IRQ_PROBE=y | ||
11 | CONFIG_GENERIC_PENDING_IRQ=y | ||
12 | CONFIG_SEMAPHORE_SLEEPERS=y | ||
13 | CONFIG_HAVE_ARCH_ALLOC_REMAP=y | ||
14 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | ||
15 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | ||
16 | CONFIG_SYS_SUPPORTS_HUGETLBFS=y | ||
17 | CONFIG_GENERIC_TIME=y | ||
18 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
19 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
20 | CONFIG_DEFAULT_MIGRATION_COST=10000000 | ||
21 | CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y | ||
22 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y | ||
23 | CONFIG_LOCKDEP_SUPPORT=y | ||
24 | CONFIG_STACKTRACE_SUPPORT=y | ||
25 | CONFIG_ARCH_DISCONTIGMEM_ENABLE=y | ||
26 | CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y | ||
27 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
28 | CONFIG_STRICT_DEVMEM=y | ||
29 | CONFIG_SMP=y | ||
30 | CONFIG_WERROR=y | ||
31 | # CONFIG_DEBUG_COPY_FROM_USER is not set | ||
32 | CONFIG_HVC_TILE=y | ||
33 | CONFIG_TILE=y | ||
34 | # CONFIG_TILEGX is not set | ||
35 | CONFIG_ARCH_DEFCONFIG="arch/tile/configs/tile_defconfig" | ||
36 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
37 | CONFIG_CONSTRUCTORS=y | ||
38 | |||
39 | # | ||
40 | # General setup | ||
41 | # | ||
42 | CONFIG_EXPERIMENTAL=y | ||
43 | CONFIG_LOCK_KERNEL=y | ||
44 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
45 | CONFIG_LOCALVERSION="" | ||
46 | CONFIG_LOCALVERSION_AUTO=y | ||
47 | # CONFIG_SWAP is not set | ||
48 | CONFIG_SYSVIPC=y | ||
49 | CONFIG_SYSVIPC_SYSCTL=y | ||
50 | # CONFIG_POSIX_MQUEUE is not set | ||
51 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
52 | # CONFIG_TASKSTATS is not set | ||
53 | # CONFIG_AUDIT is not set | ||
54 | |||
55 | # | ||
56 | # RCU Subsystem | ||
57 | # | ||
58 | CONFIG_TREE_RCU=y | ||
59 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
60 | # CONFIG_TINY_RCU is not set | ||
61 | # CONFIG_RCU_TRACE is not set | ||
62 | CONFIG_RCU_FANOUT=32 | ||
63 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
64 | # CONFIG_RCU_FAST_NO_HZ is not set | ||
65 | # CONFIG_TREE_RCU_TRACE is not set | ||
66 | # CONFIG_IKCONFIG is not set | ||
67 | CONFIG_LOG_BUF_SHIFT=17 | ||
68 | # CONFIG_CGROUPS is not set | ||
69 | # CONFIG_SYSFS_DEPRECATED_V2 is not set | ||
70 | # CONFIG_RELAY is not set | ||
71 | # CONFIG_NAMESPACES is not set | ||
72 | CONFIG_BLK_DEV_INITRD=y | ||
73 | CONFIG_INITRAMFS_SOURCE="usr/contents.txt" | ||
74 | CONFIG_INITRAMFS_ROOT_UID=0 | ||
75 | CONFIG_INITRAMFS_ROOT_GID=0 | ||
76 | CONFIG_RD_GZIP=y | ||
77 | # CONFIG_RD_BZIP2 is not set | ||
78 | # CONFIG_RD_LZMA is not set | ||
79 | # CONFIG_RD_LZO is not set | ||
80 | CONFIG_INITRAMFS_COMPRESSION_NONE=y | ||
81 | # CONFIG_INITRAMFS_COMPRESSION_GZIP is not set | ||
82 | # CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set | ||
83 | # CONFIG_INITRAMFS_COMPRESSION_LZMA is not set | ||
84 | # CONFIG_INITRAMFS_COMPRESSION_LZO is not set | ||
85 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | ||
86 | CONFIG_SYSCTL=y | ||
87 | CONFIG_ANON_INODES=y | ||
88 | CONFIG_EMBEDDED=y | ||
89 | CONFIG_SYSCTL_SYSCALL=y | ||
90 | CONFIG_KALLSYMS=y | ||
91 | # CONFIG_KALLSYMS_ALL is not set | ||
92 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
93 | CONFIG_HOTPLUG=y | ||
94 | CONFIG_PRINTK=y | ||
95 | CONFIG_BUG=y | ||
96 | CONFIG_ELF_CORE=y | ||
97 | CONFIG_BASE_FULL=y | ||
98 | CONFIG_FUTEX=y | ||
99 | CONFIG_EPOLL=y | ||
100 | CONFIG_SIGNALFD=y | ||
101 | CONFIG_TIMERFD=y | ||
102 | CONFIG_EVENTFD=y | ||
103 | CONFIG_SHMEM=y | ||
104 | CONFIG_AIO=y | ||
105 | |||
106 | # | ||
107 | # Kernel Performance Events And Counters | ||
108 | # | ||
109 | CONFIG_VM_EVENT_COUNTERS=y | ||
110 | CONFIG_PCI_QUIRKS=y | ||
111 | CONFIG_SLUB_DEBUG=y | ||
112 | # CONFIG_COMPAT_BRK is not set | ||
113 | # CONFIG_SLAB is not set | ||
114 | CONFIG_SLUB=y | ||
115 | # CONFIG_SLOB is not set | ||
116 | CONFIG_PROFILING=y | ||
117 | CONFIG_OPROFILE=y | ||
118 | CONFIG_HAVE_OPROFILE=y | ||
119 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
120 | |||
121 | # | ||
122 | # GCOV-based kernel profiling | ||
123 | # | ||
124 | # CONFIG_SLOW_WORK is not set | ||
125 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
126 | CONFIG_SLABINFO=y | ||
127 | CONFIG_RT_MUTEXES=y | ||
128 | CONFIG_BASE_SMALL=0 | ||
129 | CONFIG_MODULES=y | ||
130 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
131 | CONFIG_MODULE_UNLOAD=y | ||
132 | # CONFIG_MODULE_FORCE_UNLOAD is not set | ||
133 | # CONFIG_MODVERSIONS is not set | ||
134 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
135 | CONFIG_STOP_MACHINE=y | ||
136 | CONFIG_BLOCK=y | ||
137 | CONFIG_LBDAF=y | ||
138 | # CONFIG_BLK_DEV_BSG is not set | ||
139 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
140 | |||
141 | # | ||
142 | # IO Schedulers | ||
143 | # | ||
144 | CONFIG_IOSCHED_NOOP=y | ||
145 | # CONFIG_IOSCHED_DEADLINE is not set | ||
146 | # CONFIG_IOSCHED_CFQ is not set | ||
147 | # CONFIG_DEFAULT_DEADLINE is not set | ||
148 | # CONFIG_DEFAULT_CFQ is not set | ||
149 | CONFIG_DEFAULT_NOOP=y | ||
150 | CONFIG_DEFAULT_IOSCHED="noop" | ||
151 | # CONFIG_INLINE_SPIN_TRYLOCK is not set | ||
152 | # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | ||
153 | # CONFIG_INLINE_SPIN_LOCK is not set | ||
154 | # CONFIG_INLINE_SPIN_LOCK_BH is not set | ||
155 | # CONFIG_INLINE_SPIN_LOCK_IRQ is not set | ||
156 | # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | ||
157 | CONFIG_INLINE_SPIN_UNLOCK=y | ||
158 | # CONFIG_INLINE_SPIN_UNLOCK_BH is not set | ||
159 | CONFIG_INLINE_SPIN_UNLOCK_IRQ=y | ||
160 | # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | ||
161 | # CONFIG_INLINE_READ_TRYLOCK is not set | ||
162 | # CONFIG_INLINE_READ_LOCK is not set | ||
163 | # CONFIG_INLINE_READ_LOCK_BH is not set | ||
164 | # CONFIG_INLINE_READ_LOCK_IRQ is not set | ||
165 | # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | ||
166 | CONFIG_INLINE_READ_UNLOCK=y | ||
167 | # CONFIG_INLINE_READ_UNLOCK_BH is not set | ||
168 | CONFIG_INLINE_READ_UNLOCK_IRQ=y | ||
169 | # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | ||
170 | # CONFIG_INLINE_WRITE_TRYLOCK is not set | ||
171 | # CONFIG_INLINE_WRITE_LOCK is not set | ||
172 | # CONFIG_INLINE_WRITE_LOCK_BH is not set | ||
173 | # CONFIG_INLINE_WRITE_LOCK_IRQ is not set | ||
174 | # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | ||
175 | CONFIG_INLINE_WRITE_UNLOCK=y | ||
176 | # CONFIG_INLINE_WRITE_UNLOCK_BH is not set | ||
177 | CONFIG_INLINE_WRITE_UNLOCK_IRQ=y | ||
178 | # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | ||
179 | CONFIG_MUTEX_SPIN_ON_OWNER=y | ||
180 | |||
181 | # | ||
182 | # Tilera-specific configuration | ||
183 | # | ||
184 | CONFIG_NR_CPUS=64 | ||
185 | CONFIG_HOMECACHE=y | ||
186 | CONFIG_DATAPLANE=y | ||
187 | CONFIG_TICK_ONESHOT=y | ||
188 | CONFIG_NO_HZ=y | ||
189 | CONFIG_HIGH_RES_TIMERS=y | ||
190 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
191 | CONFIG_HZ_100=y | ||
192 | # CONFIG_HZ_250 is not set | ||
193 | # CONFIG_HZ_300 is not set | ||
194 | # CONFIG_HZ_1000 is not set | ||
195 | CONFIG_HZ=100 | ||
196 | CONFIG_SCHED_HRTICK=y | ||
197 | # CONFIG_KEXEC is not set | ||
198 | CONFIG_HIGHMEM=y | ||
199 | CONFIG_NUMA=y | ||
200 | CONFIG_NODES_SHIFT=2 | ||
201 | CONFIG_FORCE_MAX_ZONEORDER=9 | ||
202 | # CONFIG_VMSPLIT_375G is not set | ||
203 | # CONFIG_VMSPLIT_35G is not set | ||
204 | CONFIG_VMSPLIT_3G=y | ||
205 | # CONFIG_VMSPLIT_3G_OPT is not set | ||
206 | # CONFIG_VMSPLIT_2G is not set | ||
207 | # CONFIG_VMSPLIT_1G is not set | ||
208 | CONFIG_PAGE_OFFSET=0xC0000000 | ||
209 | CONFIG_SELECT_MEMORY_MODEL=y | ||
210 | # CONFIG_FLATMEM_MANUAL is not set | ||
211 | CONFIG_DISCONTIGMEM_MANUAL=y | ||
212 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
213 | CONFIG_DISCONTIGMEM=y | ||
214 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
215 | CONFIG_NEED_MULTIPLE_NODES=y | ||
216 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
217 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
218 | CONFIG_MIGRATION=y | ||
219 | CONFIG_PHYS_ADDR_T_64BIT=y | ||
220 | CONFIG_ZONE_DMA_FLAG=0 | ||
221 | CONFIG_BOUNCE=y | ||
222 | CONFIG_VIRT_TO_BUS=y | ||
223 | # CONFIG_KSM is not set | ||
224 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
225 | # CONFIG_CMDLINE_BOOL is not set | ||
226 | # CONFIG_FEEDBACK_COLLECT is not set | ||
227 | CONFIG_FEEDBACK_USE="" | ||
228 | # CONFIG_HUGEVMAP is not set | ||
229 | CONFIG_VMALLOC_RESERVE=0x1000000 | ||
230 | CONFIG_HARDWALL=y | ||
231 | CONFIG_MEMPROF=y | ||
232 | CONFIG_XGBE=y | ||
233 | CONFIG_NET_TILE=y | ||
234 | CONFIG_PSEUDO_NAPI=y | ||
235 | CONFIG_TILEPCI_ENDP=y | ||
236 | CONFIG_TILEPCI_HOST_SUBSET=m | ||
237 | CONFIG_TILE_IDE_GPIO=y | ||
238 | CONFIG_TILE_SOFTUART=y | ||
239 | |||
240 | # | ||
241 | # Bus options | ||
242 | # | ||
243 | CONFIG_PCI=y | ||
244 | CONFIG_PCI_DOMAINS=y | ||
245 | # CONFIG_NO_IOMEM is not set | ||
246 | # CONFIG_NO_IOPORT is not set | ||
247 | # CONFIG_ARCH_SUPPORTS_MSI is not set | ||
248 | CONFIG_PCI_DEBUG=y | ||
249 | # CONFIG_PCI_STUB is not set | ||
250 | # CONFIG_PCI_IOV is not set | ||
251 | # CONFIG_HOTPLUG_PCI is not set | ||
252 | |||
253 | # | ||
254 | # Executable file formats | ||
255 | # | ||
256 | CONFIG_KCORE_ELF=y | ||
257 | CONFIG_BINFMT_ELF=y | ||
258 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
259 | # CONFIG_HAVE_AOUT is not set | ||
260 | # CONFIG_BINFMT_MISC is not set | ||
261 | CONFIG_NET=y | ||
262 | |||
263 | # | ||
264 | # Networking options | ||
265 | # | ||
266 | CONFIG_PACKET=y | ||
267 | CONFIG_UNIX=y | ||
268 | CONFIG_XFRM=y | ||
269 | # CONFIG_XFRM_USER is not set | ||
270 | # CONFIG_XFRM_SUB_POLICY is not set | ||
271 | # CONFIG_XFRM_MIGRATE is not set | ||
272 | # CONFIG_XFRM_STATISTICS is not set | ||
273 | # CONFIG_NET_KEY is not set | ||
274 | CONFIG_INET=y | ||
275 | CONFIG_IP_MULTICAST=y | ||
276 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
277 | CONFIG_IP_FIB_HASH=y | ||
278 | # CONFIG_IP_PNP is not set | ||
279 | # CONFIG_NET_IPIP is not set | ||
280 | # CONFIG_NET_IPGRE is not set | ||
281 | # CONFIG_IP_MROUTE is not set | ||
282 | # CONFIG_ARPD is not set | ||
283 | # CONFIG_SYN_COOKIES is not set | ||
284 | # CONFIG_INET_AH is not set | ||
285 | # CONFIG_INET_ESP is not set | ||
286 | # CONFIG_INET_IPCOMP is not set | ||
287 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
288 | CONFIG_INET_TUNNEL=y | ||
289 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
290 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
291 | CONFIG_INET_XFRM_MODE_BEET=y | ||
292 | # CONFIG_INET_LRO is not set | ||
293 | # CONFIG_INET_DIAG is not set | ||
294 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
295 | CONFIG_TCP_CONG_CUBIC=y | ||
296 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
297 | # CONFIG_TCP_MD5SIG is not set | ||
298 | CONFIG_IPV6=y | ||
299 | # CONFIG_IPV6_PRIVACY is not set | ||
300 | # CONFIG_IPV6_ROUTER_PREF is not set | ||
301 | # CONFIG_IPV6_OPTIMISTIC_DAD is not set | ||
302 | # CONFIG_INET6_AH is not set | ||
303 | # CONFIG_INET6_ESP is not set | ||
304 | # CONFIG_INET6_IPCOMP is not set | ||
305 | # CONFIG_IPV6_MIP6 is not set | ||
306 | # CONFIG_INET6_XFRM_TUNNEL is not set | ||
307 | # CONFIG_INET6_TUNNEL is not set | ||
308 | CONFIG_INET6_XFRM_MODE_TRANSPORT=y | ||
309 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | ||
310 | CONFIG_INET6_XFRM_MODE_BEET=y | ||
311 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | ||
312 | CONFIG_IPV6_SIT=y | ||
313 | # CONFIG_IPV6_SIT_6RD is not set | ||
314 | CONFIG_IPV6_NDISC_NODETYPE=y | ||
315 | # CONFIG_IPV6_TUNNEL is not set | ||
316 | # CONFIG_IPV6_MULTIPLE_TABLES is not set | ||
317 | # CONFIG_IPV6_MROUTE is not set | ||
318 | # CONFIG_NETWORK_SECMARK is not set | ||
319 | # CONFIG_NETFILTER is not set | ||
320 | # CONFIG_IP_DCCP is not set | ||
321 | # CONFIG_IP_SCTP is not set | ||
322 | # CONFIG_RDS is not set | ||
323 | # CONFIG_TIPC is not set | ||
324 | # CONFIG_ATM is not set | ||
325 | # CONFIG_BRIDGE is not set | ||
326 | # CONFIG_NET_DSA is not set | ||
327 | # CONFIG_VLAN_8021Q is not set | ||
328 | # CONFIG_DECNET is not set | ||
329 | # CONFIG_LLC2 is not set | ||
330 | # CONFIG_IPX is not set | ||
331 | # CONFIG_ATALK is not set | ||
332 | # CONFIG_X25 is not set | ||
333 | # CONFIG_LAPB is not set | ||
334 | # CONFIG_ECONET is not set | ||
335 | # CONFIG_WAN_ROUTER is not set | ||
336 | # CONFIG_PHONET is not set | ||
337 | # CONFIG_IEEE802154 is not set | ||
338 | # CONFIG_NET_SCHED is not set | ||
339 | # CONFIG_DCB is not set | ||
340 | |||
341 | # | ||
342 | # Network testing | ||
343 | # | ||
344 | # CONFIG_NET_PKTGEN is not set | ||
345 | # CONFIG_HAMRADIO is not set | ||
346 | # CONFIG_CAN is not set | ||
347 | # CONFIG_IRDA is not set | ||
348 | # CONFIG_BT is not set | ||
349 | # CONFIG_AF_RXRPC is not set | ||
350 | # CONFIG_WIRELESS is not set | ||
351 | # CONFIG_WIMAX is not set | ||
352 | # CONFIG_RFKILL is not set | ||
353 | # CONFIG_NET_9P is not set | ||
354 | |||
355 | # | ||
356 | # Device Drivers | ||
357 | # | ||
358 | |||
359 | # | ||
360 | # Generic Driver Options | ||
361 | # | ||
362 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
363 | # CONFIG_DEVTMPFS is not set | ||
364 | CONFIG_STANDALONE=y | ||
365 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
366 | CONFIG_FW_LOADER=y | ||
367 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
368 | CONFIG_EXTRA_FIRMWARE="" | ||
369 | # CONFIG_DEBUG_DRIVER is not set | ||
370 | # CONFIG_DEBUG_DEVRES is not set | ||
371 | # CONFIG_SYS_HYPERVISOR is not set | ||
372 | # CONFIG_CONNECTOR is not set | ||
373 | # CONFIG_MTD is not set | ||
374 | # CONFIG_PARPORT is not set | ||
375 | CONFIG_BLK_DEV=y | ||
376 | # CONFIG_BLK_CPQ_DA is not set | ||
377 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
378 | # CONFIG_BLK_DEV_DAC960 is not set | ||
379 | # CONFIG_BLK_DEV_UMEM is not set | ||
380 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
381 | # CONFIG_BLK_DEV_LOOP is not set | ||
382 | |||
383 | # | ||
384 | # DRBD disabled because PROC_FS, INET or CONNECTOR not selected | ||
385 | # | ||
386 | # CONFIG_BLK_DEV_NBD is not set | ||
387 | # CONFIG_BLK_DEV_SX8 is not set | ||
388 | # CONFIG_BLK_DEV_RAM is not set | ||
389 | # CONFIG_CDROM_PKTCDVD is not set | ||
390 | # CONFIG_ATA_OVER_ETH is not set | ||
391 | # CONFIG_BLK_DEV_HD is not set | ||
392 | CONFIG_MISC_DEVICES=y | ||
393 | # CONFIG_AD525X_DPOT is not set | ||
394 | # CONFIG_PHANTOM is not set | ||
395 | # CONFIG_SGI_IOC4 is not set | ||
396 | # CONFIG_TIFM_CORE is not set | ||
397 | # CONFIG_ICS932S401 is not set | ||
398 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
399 | # CONFIG_HP_ILO is not set | ||
400 | # CONFIG_ISL29003 is not set | ||
401 | # CONFIG_SENSORS_TSL2550 is not set | ||
402 | # CONFIG_DS1682 is not set | ||
403 | # CONFIG_C2PORT is not set | ||
404 | |||
405 | # | ||
406 | # EEPROM support | ||
407 | # | ||
408 | # CONFIG_EEPROM_AT24 is not set | ||
409 | # CONFIG_EEPROM_LEGACY is not set | ||
410 | # CONFIG_EEPROM_MAX6875 is not set | ||
411 | # CONFIG_EEPROM_93CX6 is not set | ||
412 | # CONFIG_CB710_CORE is not set | ||
413 | CONFIG_HAVE_IDE=y | ||
414 | CONFIG_IDE=y | ||
415 | |||
416 | # | ||
417 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | ||
418 | # | ||
419 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
420 | CONFIG_IDE_GD=y | ||
421 | CONFIG_IDE_GD_ATA=y | ||
422 | # CONFIG_IDE_GD_ATAPI is not set | ||
423 | # CONFIG_BLK_DEV_IDECD is not set | ||
424 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
425 | # CONFIG_IDE_TASK_IOCTL is not set | ||
426 | CONFIG_IDE_PROC_FS=y | ||
427 | |||
428 | # | ||
429 | # IDE chipset support/bugfixes | ||
430 | # | ||
431 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
432 | |||
433 | # | ||
434 | # PCI IDE chipsets support | ||
435 | # | ||
436 | # CONFIG_BLK_DEV_GENERIC is not set | ||
437 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
438 | # CONFIG_BLK_DEV_AEC62XX is not set | ||
439 | # CONFIG_BLK_DEV_ALI15X3 is not set | ||
440 | # CONFIG_BLK_DEV_AMD74XX is not set | ||
441 | # CONFIG_BLK_DEV_CMD64X is not set | ||
442 | # CONFIG_BLK_DEV_TRIFLEX is not set | ||
443 | # CONFIG_BLK_DEV_CS5520 is not set | ||
444 | # CONFIG_BLK_DEV_CS5530 is not set | ||
445 | # CONFIG_BLK_DEV_HPT366 is not set | ||
446 | # CONFIG_BLK_DEV_JMICRON is not set | ||
447 | # CONFIG_BLK_DEV_SC1200 is not set | ||
448 | # CONFIG_BLK_DEV_PIIX is not set | ||
449 | # CONFIG_BLK_DEV_IT8172 is not set | ||
450 | # CONFIG_BLK_DEV_IT8213 is not set | ||
451 | # CONFIG_BLK_DEV_IT821X is not set | ||
452 | # CONFIG_BLK_DEV_NS87415 is not set | ||
453 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | ||
454 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | ||
455 | # CONFIG_BLK_DEV_SVWKS is not set | ||
456 | # CONFIG_BLK_DEV_SIIMAGE is not set | ||
457 | # CONFIG_BLK_DEV_SLC90E66 is not set | ||
458 | # CONFIG_BLK_DEV_TRM290 is not set | ||
459 | # CONFIG_BLK_DEV_VIA82CXXX is not set | ||
460 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
461 | # CONFIG_BLK_DEV_IDEDMA is not set | ||
462 | |||
463 | # | ||
464 | # SCSI device support | ||
465 | # | ||
466 | CONFIG_SCSI_MOD=y | ||
467 | # CONFIG_RAID_ATTRS is not set | ||
468 | CONFIG_SCSI=y | ||
469 | CONFIG_SCSI_DMA=y | ||
470 | # CONFIG_SCSI_TGT is not set | ||
471 | # CONFIG_SCSI_NETLINK is not set | ||
472 | CONFIG_SCSI_PROC_FS=y | ||
473 | |||
474 | # | ||
475 | # SCSI support type (disk, tape, CD-ROM) | ||
476 | # | ||
477 | CONFIG_BLK_DEV_SD=y | ||
478 | # CONFIG_CHR_DEV_ST is not set | ||
479 | # CONFIG_CHR_DEV_OSST is not set | ||
480 | # CONFIG_BLK_DEV_SR is not set | ||
481 | # CONFIG_CHR_DEV_SG is not set | ||
482 | # CONFIG_CHR_DEV_SCH is not set | ||
483 | # CONFIG_SCSI_MULTI_LUN is not set | ||
484 | CONFIG_SCSI_CONSTANTS=y | ||
485 | CONFIG_SCSI_LOGGING=y | ||
486 | # CONFIG_SCSI_SCAN_ASYNC is not set | ||
487 | CONFIG_SCSI_WAIT_SCAN=m | ||
488 | |||
489 | # | ||
490 | # SCSI Transports | ||
491 | # | ||
492 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
493 | # CONFIG_SCSI_FC_ATTRS is not set | ||
494 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
495 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
496 | # CONFIG_SCSI_SRP_ATTRS is not set | ||
497 | CONFIG_SCSI_LOWLEVEL=y | ||
498 | # CONFIG_ISCSI_TCP is not set | ||
499 | # CONFIG_SCSI_BNX2_ISCSI is not set | ||
500 | # CONFIG_BE2ISCSI is not set | ||
501 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | ||
502 | # CONFIG_SCSI_HPSA is not set | ||
503 | # CONFIG_SCSI_3W_9XXX is not set | ||
504 | # CONFIG_SCSI_3W_SAS is not set | ||
505 | # CONFIG_SCSI_ACARD is not set | ||
506 | # CONFIG_SCSI_AACRAID is not set | ||
507 | # CONFIG_SCSI_AIC7XXX is not set | ||
508 | # CONFIG_SCSI_AIC7XXX_OLD is not set | ||
509 | # CONFIG_SCSI_AIC79XX is not set | ||
510 | # CONFIG_SCSI_AIC94XX is not set | ||
511 | # CONFIG_SCSI_MVSAS is not set | ||
512 | # CONFIG_SCSI_DPT_I2O is not set | ||
513 | # CONFIG_SCSI_ADVANSYS is not set | ||
514 | # CONFIG_SCSI_ARCMSR is not set | ||
515 | # CONFIG_MEGARAID_NEWGEN is not set | ||
516 | # CONFIG_MEGARAID_LEGACY is not set | ||
517 | # CONFIG_MEGARAID_SAS is not set | ||
518 | # CONFIG_SCSI_MPT2SAS is not set | ||
519 | # CONFIG_SCSI_HPTIOP is not set | ||
520 | # CONFIG_LIBFC is not set | ||
521 | # CONFIG_LIBFCOE is not set | ||
522 | # CONFIG_FCOE is not set | ||
523 | # CONFIG_SCSI_DMX3191D is not set | ||
524 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | ||
525 | # CONFIG_SCSI_IPS is not set | ||
526 | # CONFIG_SCSI_INITIO is not set | ||
527 | # CONFIG_SCSI_INIA100 is not set | ||
528 | # CONFIG_SCSI_STEX is not set | ||
529 | # CONFIG_SCSI_SYM53C8XX_2 is not set | ||
530 | # CONFIG_SCSI_IPR is not set | ||
531 | # CONFIG_SCSI_QLOGIC_1280 is not set | ||
532 | # CONFIG_SCSI_QLA_FC is not set | ||
533 | # CONFIG_SCSI_QLA_ISCSI is not set | ||
534 | # CONFIG_SCSI_LPFC is not set | ||
535 | # CONFIG_SCSI_DC395x is not set | ||
536 | # CONFIG_SCSI_DC390T is not set | ||
537 | # CONFIG_SCSI_NSP32 is not set | ||
538 | # CONFIG_SCSI_DEBUG is not set | ||
539 | # CONFIG_SCSI_PMCRAID is not set | ||
540 | # CONFIG_SCSI_PM8001 is not set | ||
541 | # CONFIG_SCSI_SRP is not set | ||
542 | # CONFIG_SCSI_BFA_FC is not set | ||
543 | # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set | ||
544 | # CONFIG_SCSI_DH is not set | ||
545 | # CONFIG_SCSI_OSD_INITIATOR is not set | ||
546 | CONFIG_ATA=y | ||
547 | # CONFIG_ATA_NONSTANDARD is not set | ||
548 | CONFIG_ATA_VERBOSE_ERROR=y | ||
549 | CONFIG_SATA_PMP=y | ||
550 | # CONFIG_SATA_AHCI is not set | ||
551 | CONFIG_SATA_SIL24=y | ||
552 | CONFIG_ATA_SFF=y | ||
553 | # CONFIG_SATA_SVW is not set | ||
554 | # CONFIG_ATA_PIIX is not set | ||
555 | # CONFIG_SATA_MV is not set | ||
556 | # CONFIG_SATA_NV is not set | ||
557 | # CONFIG_PDC_ADMA is not set | ||
558 | # CONFIG_SATA_QSTOR is not set | ||
559 | # CONFIG_SATA_PROMISE is not set | ||
560 | # CONFIG_SATA_SX4 is not set | ||
561 | # CONFIG_SATA_SIL is not set | ||
562 | # CONFIG_SATA_SIS is not set | ||
563 | # CONFIG_SATA_ULI is not set | ||
564 | # CONFIG_SATA_VIA is not set | ||
565 | # CONFIG_SATA_VITESSE is not set | ||
566 | # CONFIG_SATA_INIC162X is not set | ||
567 | # CONFIG_PATA_ALI is not set | ||
568 | # CONFIG_PATA_AMD is not set | ||
569 | # CONFIG_PATA_ARTOP is not set | ||
570 | # CONFIG_PATA_ATP867X is not set | ||
571 | # CONFIG_PATA_ATIIXP is not set | ||
572 | # CONFIG_PATA_CMD640_PCI is not set | ||
573 | # CONFIG_PATA_CMD64X is not set | ||
574 | # CONFIG_PATA_CS5520 is not set | ||
575 | # CONFIG_PATA_CS5530 is not set | ||
576 | # CONFIG_PATA_CYPRESS is not set | ||
577 | # CONFIG_PATA_EFAR is not set | ||
578 | # CONFIG_ATA_GENERIC is not set | ||
579 | # CONFIG_PATA_HPT366 is not set | ||
580 | # CONFIG_PATA_HPT37X is not set | ||
581 | # CONFIG_PATA_HPT3X2N is not set | ||
582 | # CONFIG_PATA_HPT3X3 is not set | ||
583 | # CONFIG_PATA_IT821X is not set | ||
584 | # CONFIG_PATA_IT8213 is not set | ||
585 | # CONFIG_PATA_JMICRON is not set | ||
586 | # CONFIG_PATA_LEGACY is not set | ||
587 | # CONFIG_PATA_TRIFLEX is not set | ||
588 | # CONFIG_PATA_MARVELL is not set | ||
589 | # CONFIG_PATA_MPIIX is not set | ||
590 | # CONFIG_PATA_OLDPIIX is not set | ||
591 | # CONFIG_PATA_NETCELL is not set | ||
592 | # CONFIG_PATA_NINJA32 is not set | ||
593 | # CONFIG_PATA_NS87410 is not set | ||
594 | # CONFIG_PATA_NS87415 is not set | ||
595 | # CONFIG_PATA_OPTI is not set | ||
596 | # CONFIG_PATA_OPTIDMA is not set | ||
597 | # CONFIG_PATA_PDC2027X is not set | ||
598 | # CONFIG_PATA_PDC_OLD is not set | ||
599 | # CONFIG_PATA_RADISYS is not set | ||
600 | # CONFIG_PATA_RDC is not set | ||
601 | # CONFIG_PATA_RZ1000 is not set | ||
602 | # CONFIG_PATA_SC1200 is not set | ||
603 | # CONFIG_PATA_SERVERWORKS is not set | ||
604 | # CONFIG_PATA_SIL680 is not set | ||
605 | # CONFIG_PATA_SIS is not set | ||
606 | # CONFIG_PATA_TOSHIBA is not set | ||
607 | # CONFIG_PATA_VIA is not set | ||
608 | # CONFIG_PATA_WINBOND is not set | ||
609 | # CONFIG_PATA_PLATFORM is not set | ||
610 | # CONFIG_PATA_SCH is not set | ||
611 | # CONFIG_MD is not set | ||
612 | # CONFIG_FUSION is not set | ||
613 | |||
614 | # | ||
615 | # IEEE 1394 (FireWire) support | ||
616 | # | ||
617 | |||
618 | # | ||
619 | # You can enable one or both FireWire driver stacks. | ||
620 | # | ||
621 | |||
622 | # | ||
623 | # The newer stack is recommended. | ||
624 | # | ||
625 | # CONFIG_FIREWIRE is not set | ||
626 | # CONFIG_IEEE1394 is not set | ||
627 | # CONFIG_I2O is not set | ||
628 | CONFIG_NETDEVICES=y | ||
629 | # CONFIG_DUMMY is not set | ||
630 | # CONFIG_BONDING is not set | ||
631 | # CONFIG_MACVLAN is not set | ||
632 | # CONFIG_EQUALIZER is not set | ||
633 | CONFIG_TUN=y | ||
634 | # CONFIG_VETH is not set | ||
635 | # CONFIG_ARCNET is not set | ||
636 | # CONFIG_NET_ETHERNET is not set | ||
637 | CONFIG_NETDEV_1000=y | ||
638 | # CONFIG_ACENIC is not set | ||
639 | # CONFIG_DL2K is not set | ||
640 | # CONFIG_E1000 is not set | ||
641 | CONFIG_E1000E=y | ||
642 | # CONFIG_IP1000 is not set | ||
643 | # CONFIG_IGB is not set | ||
644 | # CONFIG_IGBVF is not set | ||
645 | # CONFIG_NS83820 is not set | ||
646 | # CONFIG_HAMACHI is not set | ||
647 | # CONFIG_YELLOWFIN is not set | ||
648 | # CONFIG_R8169 is not set | ||
649 | # CONFIG_SIS190 is not set | ||
650 | # CONFIG_SKGE is not set | ||
651 | # CONFIG_SKY2 is not set | ||
652 | # CONFIG_VIA_VELOCITY is not set | ||
653 | # CONFIG_TIGON3 is not set | ||
654 | # CONFIG_BNX2 is not set | ||
655 | # CONFIG_CNIC is not set | ||
656 | # CONFIG_QLA3XXX is not set | ||
657 | # CONFIG_ATL1 is not set | ||
658 | # CONFIG_ATL1E is not set | ||
659 | # CONFIG_ATL1C is not set | ||
660 | # CONFIG_JME is not set | ||
661 | # CONFIG_NETDEV_10000 is not set | ||
662 | # CONFIG_TR is not set | ||
663 | # CONFIG_WLAN is not set | ||
664 | |||
665 | # | ||
666 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
667 | # | ||
668 | # CONFIG_WAN is not set | ||
669 | # CONFIG_FDDI is not set | ||
670 | # CONFIG_HIPPI is not set | ||
671 | # CONFIG_PPP is not set | ||
672 | # CONFIG_SLIP is not set | ||
673 | # CONFIG_NET_FC is not set | ||
674 | # CONFIG_NETCONSOLE is not set | ||
675 | # CONFIG_NETPOLL is not set | ||
676 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
677 | # CONFIG_VMXNET3 is not set | ||
678 | # CONFIG_ISDN is not set | ||
679 | # CONFIG_PHONE is not set | ||
680 | |||
681 | # | ||
682 | # Input device support | ||
683 | # | ||
684 | CONFIG_INPUT=y | ||
685 | # CONFIG_INPUT_FF_MEMLESS is not set | ||
686 | # CONFIG_INPUT_POLLDEV is not set | ||
687 | # CONFIG_INPUT_SPARSEKMAP is not set | ||
688 | |||
689 | # | ||
690 | # Userland interfaces | ||
691 | # | ||
692 | # CONFIG_INPUT_MOUSEDEV is not set | ||
693 | # CONFIG_INPUT_JOYDEV is not set | ||
694 | # CONFIG_INPUT_EVDEV is not set | ||
695 | # CONFIG_INPUT_EVBUG is not set | ||
696 | |||
697 | # | ||
698 | # Input Device Drivers | ||
699 | # | ||
700 | # CONFIG_INPUT_KEYBOARD is not set | ||
701 | # CONFIG_INPUT_MOUSE is not set | ||
702 | # CONFIG_INPUT_JOYSTICK is not set | ||
703 | # CONFIG_INPUT_TABLET is not set | ||
704 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
705 | # CONFIG_INPUT_MISC is not set | ||
706 | |||
707 | # | ||
708 | # Hardware I/O ports | ||
709 | # | ||
710 | # CONFIG_SERIO is not set | ||
711 | # CONFIG_GAMEPORT is not set | ||
712 | |||
713 | # | ||
714 | # Character devices | ||
715 | # | ||
716 | # CONFIG_VT is not set | ||
717 | CONFIG_DEVKMEM=y | ||
718 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
719 | # CONFIG_NOZOMI is not set | ||
720 | |||
721 | # | ||
722 | # Serial drivers | ||
723 | # | ||
724 | # CONFIG_SERIAL_8250 is not set | ||
725 | |||
726 | # | ||
727 | # Non-8250 serial port support | ||
728 | # | ||
729 | # CONFIG_SERIAL_JSM is not set | ||
730 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
731 | CONFIG_UNIX98_PTYS=y | ||
732 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
733 | # CONFIG_LEGACY_PTYS is not set | ||
734 | CONFIG_HVC_DRIVER=y | ||
735 | # CONFIG_IPMI_HANDLER is not set | ||
736 | # CONFIG_HW_RANDOM is not set | ||
737 | # CONFIG_R3964 is not set | ||
738 | # CONFIG_APPLICOM is not set | ||
739 | |||
740 | # | ||
741 | # PCMCIA character devices | ||
742 | # | ||
743 | # CONFIG_RAW_DRIVER is not set | ||
744 | # CONFIG_TCG_TPM is not set | ||
745 | CONFIG_DEVPORT=y | ||
746 | CONFIG_I2C=y | ||
747 | CONFIG_I2C_BOARDINFO=y | ||
748 | CONFIG_I2C_COMPAT=y | ||
749 | CONFIG_I2C_CHARDEV=y | ||
750 | CONFIG_I2C_HELPER_AUTO=y | ||
751 | |||
752 | # | ||
753 | # I2C Hardware Bus support | ||
754 | # | ||
755 | |||
756 | # | ||
757 | # PC SMBus host controller drivers | ||
758 | # | ||
759 | # CONFIG_I2C_ALI1535 is not set | ||
760 | # CONFIG_I2C_ALI1563 is not set | ||
761 | # CONFIG_I2C_ALI15X3 is not set | ||
762 | # CONFIG_I2C_AMD756 is not set | ||
763 | # CONFIG_I2C_AMD8111 is not set | ||
764 | # CONFIG_I2C_I801 is not set | ||
765 | # CONFIG_I2C_ISCH is not set | ||
766 | # CONFIG_I2C_PIIX4 is not set | ||
767 | # CONFIG_I2C_NFORCE2 is not set | ||
768 | # CONFIG_I2C_SIS5595 is not set | ||
769 | # CONFIG_I2C_SIS630 is not set | ||
770 | # CONFIG_I2C_SIS96X is not set | ||
771 | # CONFIG_I2C_VIA is not set | ||
772 | # CONFIG_I2C_VIAPRO is not set | ||
773 | |||
774 | # | ||
775 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
776 | # | ||
777 | # CONFIG_I2C_OCORES is not set | ||
778 | # CONFIG_I2C_SIMTEC is not set | ||
779 | # CONFIG_I2C_XILINX is not set | ||
780 | |||
781 | # | ||
782 | # External I2C/SMBus adapter drivers | ||
783 | # | ||
784 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
785 | # CONFIG_I2C_TAOS_EVM is not set | ||
786 | |||
787 | # | ||
788 | # Other I2C/SMBus bus drivers | ||
789 | # | ||
790 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
791 | # CONFIG_I2C_STUB is not set | ||
792 | # CONFIG_I2C_DEBUG_CORE is not set | ||
793 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
794 | # CONFIG_I2C_DEBUG_BUS is not set | ||
795 | # CONFIG_SPI is not set | ||
796 | |||
797 | # | ||
798 | # PPS support | ||
799 | # | ||
800 | # CONFIG_PPS is not set | ||
801 | # CONFIG_W1 is not set | ||
802 | # CONFIG_POWER_SUPPLY is not set | ||
803 | # CONFIG_HWMON is not set | ||
804 | # CONFIG_THERMAL is not set | ||
805 | CONFIG_WATCHDOG=y | ||
806 | CONFIG_WATCHDOG_NOWAYOUT=y | ||
807 | |||
808 | # | ||
809 | # Watchdog Device Drivers | ||
810 | # | ||
811 | # CONFIG_SOFT_WATCHDOG is not set | ||
812 | # CONFIG_ALIM7101_WDT is not set | ||
813 | |||
814 | # | ||
815 | # PCI-based Watchdog Cards | ||
816 | # | ||
817 | # CONFIG_PCIPCWATCHDOG is not set | ||
818 | # CONFIG_WDTPCI is not set | ||
819 | CONFIG_SSB_POSSIBLE=y | ||
820 | |||
821 | # | ||
822 | # Sonics Silicon Backplane | ||
823 | # | ||
824 | # CONFIG_SSB is not set | ||
825 | |||
826 | # | ||
827 | # Multifunction device drivers | ||
828 | # | ||
829 | # CONFIG_MFD_CORE is not set | ||
830 | # CONFIG_MFD_88PM860X is not set | ||
831 | # CONFIG_MFD_SM501 is not set | ||
832 | # CONFIG_HTC_PASIC3 is not set | ||
833 | # CONFIG_TWL4030_CORE is not set | ||
834 | # CONFIG_MFD_TMIO is not set | ||
835 | # CONFIG_PMIC_DA903X is not set | ||
836 | # CONFIG_PMIC_ADP5520 is not set | ||
837 | # CONFIG_MFD_MAX8925 is not set | ||
838 | # CONFIG_MFD_WM8400 is not set | ||
839 | # CONFIG_MFD_WM831X is not set | ||
840 | # CONFIG_MFD_WM8350_I2C is not set | ||
841 | # CONFIG_MFD_WM8994 is not set | ||
842 | # CONFIG_MFD_PCF50633 is not set | ||
843 | # CONFIG_AB3100_CORE is not set | ||
844 | # CONFIG_LPC_SCH is not set | ||
845 | # CONFIG_REGULATOR is not set | ||
846 | # CONFIG_MEDIA_SUPPORT is not set | ||
847 | |||
848 | # | ||
849 | # Graphics support | ||
850 | # | ||
851 | # CONFIG_VGA_ARB is not set | ||
852 | # CONFIG_DRM is not set | ||
853 | # CONFIG_VGASTATE is not set | ||
854 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
855 | # CONFIG_FB is not set | ||
856 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
857 | |||
858 | # | ||
859 | # Display device support | ||
860 | # | ||
861 | # CONFIG_DISPLAY_SUPPORT is not set | ||
862 | # CONFIG_SOUND is not set | ||
863 | # CONFIG_HID_SUPPORT is not set | ||
864 | # CONFIG_USB_SUPPORT is not set | ||
865 | # CONFIG_UWB is not set | ||
866 | # CONFIG_MMC is not set | ||
867 | # CONFIG_MEMSTICK is not set | ||
868 | # CONFIG_NEW_LEDS is not set | ||
869 | # CONFIG_ACCESSIBILITY is not set | ||
870 | # CONFIG_INFINIBAND is not set | ||
871 | CONFIG_RTC_LIB=y | ||
872 | CONFIG_RTC_CLASS=y | ||
873 | CONFIG_RTC_HCTOSYS=y | ||
874 | CONFIG_RTC_HCTOSYS_DEVICE="rtc0" | ||
875 | # CONFIG_RTC_DEBUG is not set | ||
876 | |||
877 | # | ||
878 | # RTC interfaces | ||
879 | # | ||
880 | # CONFIG_RTC_INTF_SYSFS is not set | ||
881 | # CONFIG_RTC_INTF_PROC is not set | ||
882 | CONFIG_RTC_INTF_DEV=y | ||
883 | # CONFIG_RTC_INTF_DEV_UIE_EMUL is not set | ||
884 | # CONFIG_RTC_DRV_TEST is not set | ||
885 | |||
886 | # | ||
887 | # I2C RTC drivers | ||
888 | # | ||
889 | # CONFIG_RTC_DRV_DS1307 is not set | ||
890 | # CONFIG_RTC_DRV_DS1374 is not set | ||
891 | # CONFIG_RTC_DRV_DS1672 is not set | ||
892 | # CONFIG_RTC_DRV_MAX6900 is not set | ||
893 | # CONFIG_RTC_DRV_RS5C372 is not set | ||
894 | # CONFIG_RTC_DRV_ISL1208 is not set | ||
895 | # CONFIG_RTC_DRV_X1205 is not set | ||
896 | # CONFIG_RTC_DRV_PCF8563 is not set | ||
897 | # CONFIG_RTC_DRV_PCF8583 is not set | ||
898 | # CONFIG_RTC_DRV_M41T80 is not set | ||
899 | # CONFIG_RTC_DRV_BQ32K is not set | ||
900 | # CONFIG_RTC_DRV_S35390A is not set | ||
901 | # CONFIG_RTC_DRV_FM3130 is not set | ||
902 | # CONFIG_RTC_DRV_RX8581 is not set | ||
903 | # CONFIG_RTC_DRV_RX8025 is not set | ||
904 | |||
905 | # | ||
906 | # SPI RTC drivers | ||
907 | # | ||
908 | |||
909 | # | ||
910 | # Platform RTC drivers | ||
911 | # | ||
912 | # CONFIG_RTC_DRV_DS1286 is not set | ||
913 | # CONFIG_RTC_DRV_DS1511 is not set | ||
914 | # CONFIG_RTC_DRV_DS1553 is not set | ||
915 | # CONFIG_RTC_DRV_DS1742 is not set | ||
916 | # CONFIG_RTC_DRV_STK17TA8 is not set | ||
917 | # CONFIG_RTC_DRV_M48T86 is not set | ||
918 | # CONFIG_RTC_DRV_M48T35 is not set | ||
919 | # CONFIG_RTC_DRV_M48T59 is not set | ||
920 | # CONFIG_RTC_DRV_MSM6242 is not set | ||
921 | # CONFIG_RTC_DRV_BQ4802 is not set | ||
922 | # CONFIG_RTC_DRV_RP5C01 is not set | ||
923 | # CONFIG_RTC_DRV_V3020 is not set | ||
924 | |||
925 | # | ||
926 | # on-CPU RTC drivers | ||
927 | # | ||
928 | # CONFIG_DMADEVICES is not set | ||
929 | # CONFIG_AUXDISPLAY is not set | ||
930 | # CONFIG_UIO is not set | ||
931 | |||
932 | # | ||
933 | # TI VLYNQ | ||
934 | # | ||
935 | # CONFIG_STAGING is not set | ||
936 | |||
937 | # | ||
938 | # File systems | ||
939 | # | ||
940 | CONFIG_EXT2_FS=y | ||
941 | # CONFIG_EXT2_FS_XATTR is not set | ||
942 | # CONFIG_EXT2_FS_XIP is not set | ||
943 | CONFIG_EXT3_FS=y | ||
944 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
945 | CONFIG_EXT3_FS_XATTR=y | ||
946 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
947 | # CONFIG_EXT3_FS_SECURITY is not set | ||
948 | # CONFIG_EXT4_FS is not set | ||
949 | CONFIG_JBD=y | ||
950 | CONFIG_FS_MBCACHE=y | ||
951 | # CONFIG_REISERFS_FS is not set | ||
952 | # CONFIG_JFS_FS is not set | ||
953 | # CONFIG_FS_POSIX_ACL is not set | ||
954 | # CONFIG_XFS_FS is not set | ||
955 | # CONFIG_GFS2_FS is not set | ||
956 | # CONFIG_OCFS2_FS is not set | ||
957 | # CONFIG_BTRFS_FS is not set | ||
958 | # CONFIG_NILFS2_FS is not set | ||
959 | CONFIG_FILE_LOCKING=y | ||
960 | CONFIG_FSNOTIFY=y | ||
961 | CONFIG_DNOTIFY=y | ||
962 | # CONFIG_INOTIFY is not set | ||
963 | CONFIG_INOTIFY_USER=y | ||
964 | # CONFIG_QUOTA is not set | ||
965 | # CONFIG_AUTOFS_FS is not set | ||
966 | # CONFIG_AUTOFS4_FS is not set | ||
967 | CONFIG_FUSE_FS=y | ||
968 | # CONFIG_CUSE is not set | ||
969 | |||
970 | # | ||
971 | # Caches | ||
972 | # | ||
973 | # CONFIG_FSCACHE is not set | ||
974 | |||
975 | # | ||
976 | # CD-ROM/DVD Filesystems | ||
977 | # | ||
978 | # CONFIG_ISO9660_FS is not set | ||
979 | # CONFIG_UDF_FS is not set | ||
980 | |||
981 | # | ||
982 | # DOS/FAT/NT Filesystems | ||
983 | # | ||
984 | CONFIG_FAT_FS=y | ||
985 | CONFIG_MSDOS_FS=y | ||
986 | CONFIG_VFAT_FS=m | ||
987 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
988 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | ||
989 | # CONFIG_NTFS_FS is not set | ||
990 | |||
991 | # | ||
992 | # Pseudo filesystems | ||
993 | # | ||
994 | CONFIG_PROC_FS=y | ||
995 | # CONFIG_PROC_KCORE is not set | ||
996 | CONFIG_PROC_SYSCTL=y | ||
997 | CONFIG_PROC_PAGE_MONITOR=y | ||
998 | CONFIG_SYSFS=y | ||
999 | CONFIG_TMPFS=y | ||
1000 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1001 | CONFIG_HUGETLBFS=y | ||
1002 | CONFIG_HUGETLB_PAGE=y | ||
1003 | # CONFIG_CONFIGFS_FS is not set | ||
1004 | CONFIG_MISC_FILESYSTEMS=y | ||
1005 | # CONFIG_ADFS_FS is not set | ||
1006 | # CONFIG_AFFS_FS is not set | ||
1007 | # CONFIG_HFS_FS is not set | ||
1008 | # CONFIG_HFSPLUS_FS is not set | ||
1009 | # CONFIG_BEFS_FS is not set | ||
1010 | # CONFIG_BFS_FS is not set | ||
1011 | # CONFIG_EFS_FS is not set | ||
1012 | # CONFIG_LOGFS is not set | ||
1013 | # CONFIG_CRAMFS is not set | ||
1014 | # CONFIG_SQUASHFS is not set | ||
1015 | # CONFIG_VXFS_FS is not set | ||
1016 | # CONFIG_MINIX_FS is not set | ||
1017 | # CONFIG_OMFS_FS is not set | ||
1018 | # CONFIG_HPFS_FS is not set | ||
1019 | # CONFIG_QNX4FS_FS is not set | ||
1020 | # CONFIG_ROMFS_FS is not set | ||
1021 | # CONFIG_SYSV_FS is not set | ||
1022 | # CONFIG_UFS_FS is not set | ||
1023 | CONFIG_NETWORK_FILESYSTEMS=y | ||
1024 | CONFIG_NFS_FS=m | ||
1025 | CONFIG_NFS_V3=y | ||
1026 | # CONFIG_NFS_V3_ACL is not set | ||
1027 | # CONFIG_NFS_V4 is not set | ||
1028 | # CONFIG_NFSD is not set | ||
1029 | CONFIG_LOCKD=m | ||
1030 | CONFIG_LOCKD_V4=y | ||
1031 | CONFIG_NFS_COMMON=y | ||
1032 | CONFIG_SUNRPC=m | ||
1033 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
1034 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1035 | # CONFIG_SMB_FS is not set | ||
1036 | # CONFIG_CEPH_FS is not set | ||
1037 | # CONFIG_CIFS is not set | ||
1038 | # CONFIG_NCP_FS is not set | ||
1039 | # CONFIG_CODA_FS is not set | ||
1040 | # CONFIG_AFS_FS is not set | ||
1041 | |||
1042 | # | ||
1043 | # Partition Types | ||
1044 | # | ||
1045 | # CONFIG_PARTITION_ADVANCED is not set | ||
1046 | CONFIG_MSDOS_PARTITION=y | ||
1047 | CONFIG_NLS=y | ||
1048 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
1049 | CONFIG_NLS_CODEPAGE_437=y | ||
1050 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1051 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1052 | # CONFIG_NLS_CODEPAGE_850 is not set | ||
1053 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1054 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1055 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1056 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1057 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1058 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1059 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1060 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1061 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1062 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1063 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1064 | # CONFIG_NLS_CODEPAGE_936 is not set | ||
1065 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1066 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1067 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1068 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1069 | # CONFIG_NLS_ISO8859_8 is not set | ||
1070 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1071 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1072 | # CONFIG_NLS_ASCII is not set | ||
1073 | CONFIG_NLS_ISO8859_1=y | ||
1074 | # CONFIG_NLS_ISO8859_2 is not set | ||
1075 | # CONFIG_NLS_ISO8859_3 is not set | ||
1076 | # CONFIG_NLS_ISO8859_4 is not set | ||
1077 | # CONFIG_NLS_ISO8859_5 is not set | ||
1078 | # CONFIG_NLS_ISO8859_6 is not set | ||
1079 | # CONFIG_NLS_ISO8859_7 is not set | ||
1080 | # CONFIG_NLS_ISO8859_9 is not set | ||
1081 | # CONFIG_NLS_ISO8859_13 is not set | ||
1082 | # CONFIG_NLS_ISO8859_14 is not set | ||
1083 | # CONFIG_NLS_ISO8859_15 is not set | ||
1084 | # CONFIG_NLS_KOI8_R is not set | ||
1085 | # CONFIG_NLS_KOI8_U is not set | ||
1086 | # CONFIG_NLS_UTF8 is not set | ||
1087 | # CONFIG_DLM is not set | ||
1088 | |||
1089 | # | ||
1090 | # Kernel hacking | ||
1091 | # | ||
1092 | # CONFIG_PRINTK_TIME is not set | ||
1093 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
1094 | CONFIG_ENABLE_MUST_CHECK=y | ||
1095 | CONFIG_FRAME_WARN=2048 | ||
1096 | CONFIG_MAGIC_SYSRQ=y | ||
1097 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1098 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1099 | # CONFIG_DEBUG_FS is not set | ||
1100 | # CONFIG_HEADERS_CHECK is not set | ||
1101 | CONFIG_DEBUG_KERNEL=y | ||
1102 | # CONFIG_DEBUG_SHIRQ is not set | ||
1103 | CONFIG_DETECT_SOFTLOCKUP=y | ||
1104 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | ||
1105 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | ||
1106 | CONFIG_DETECT_HUNG_TASK=y | ||
1107 | # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | ||
1108 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | ||
1109 | CONFIG_SCHED_DEBUG=y | ||
1110 | # CONFIG_SCHEDSTATS is not set | ||
1111 | # CONFIG_TIMER_STATS is not set | ||
1112 | # CONFIG_DEBUG_OBJECTS is not set | ||
1113 | # CONFIG_SLUB_DEBUG_ON is not set | ||
1114 | # CONFIG_SLUB_STATS is not set | ||
1115 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
1116 | # CONFIG_RT_MUTEX_TESTER is not set | ||
1117 | # CONFIG_DEBUG_SPINLOCK is not set | ||
1118 | # CONFIG_DEBUG_MUTEXES is not set | ||
1119 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
1120 | # CONFIG_PROVE_LOCKING is not set | ||
1121 | # CONFIG_LOCK_STAT is not set | ||
1122 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | ||
1123 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
1124 | CONFIG_STACKTRACE=y | ||
1125 | # CONFIG_DEBUG_KOBJECT is not set | ||
1126 | # CONFIG_DEBUG_HIGHMEM is not set | ||
1127 | CONFIG_DEBUG_INFO=y | ||
1128 | CONFIG_DEBUG_VM=y | ||
1129 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
1130 | # CONFIG_DEBUG_MEMORY_INIT is not set | ||
1131 | # CONFIG_DEBUG_LIST is not set | ||
1132 | # CONFIG_DEBUG_SG is not set | ||
1133 | # CONFIG_DEBUG_NOTIFIERS is not set | ||
1134 | # CONFIG_DEBUG_CREDENTIALS is not set | ||
1135 | # CONFIG_RCU_TORTURE_TEST is not set | ||
1136 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1137 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
1138 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
1139 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | ||
1140 | # CONFIG_FAULT_INJECTION is not set | ||
1141 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
1142 | # CONFIG_PAGE_POISONING is not set | ||
1143 | CONFIG_RING_BUFFER=y | ||
1144 | CONFIG_RING_BUFFER_ALLOW_SWAP=y | ||
1145 | CONFIG_TRACING_SUPPORT=y | ||
1146 | CONFIG_FTRACE=y | ||
1147 | # CONFIG_IRQSOFF_TRACER is not set | ||
1148 | # CONFIG_SCHED_TRACER is not set | ||
1149 | # CONFIG_ENABLE_DEFAULT_TRACERS is not set | ||
1150 | # CONFIG_BOOT_TRACER is not set | ||
1151 | CONFIG_BRANCH_PROFILE_NONE=y | ||
1152 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | ||
1153 | # CONFIG_PROFILE_ALL_BRANCHES is not set | ||
1154 | # CONFIG_KMEMTRACE is not set | ||
1155 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1156 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1157 | # CONFIG_RING_BUFFER_BENCHMARK is not set | ||
1158 | # CONFIG_SAMPLES is not set | ||
1159 | CONFIG_EARLY_PRINTK=y | ||
1160 | CONFIG_DEBUG_STACKOVERFLOW=y | ||
1161 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
1162 | CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly" | ||
1163 | |||
1164 | # | ||
1165 | # Security options | ||
1166 | # | ||
1167 | # CONFIG_KEYS is not set | ||
1168 | # CONFIG_SECURITY is not set | ||
1169 | # CONFIG_SECURITYFS is not set | ||
1170 | # CONFIG_DEFAULT_SECURITY_SELINUX is not set | ||
1171 | # CONFIG_DEFAULT_SECURITY_SMACK is not set | ||
1172 | # CONFIG_DEFAULT_SECURITY_TOMOYO is not set | ||
1173 | CONFIG_DEFAULT_SECURITY_DAC=y | ||
1174 | CONFIG_DEFAULT_SECURITY="" | ||
1175 | CONFIG_CRYPTO=y | ||
1176 | |||
1177 | # | ||
1178 | # Crypto core or helper | ||
1179 | # | ||
1180 | # CONFIG_CRYPTO_FIPS is not set | ||
1181 | CONFIG_CRYPTO_ALGAPI=m | ||
1182 | CONFIG_CRYPTO_ALGAPI2=m | ||
1183 | CONFIG_CRYPTO_RNG=m | ||
1184 | CONFIG_CRYPTO_RNG2=m | ||
1185 | # CONFIG_CRYPTO_MANAGER is not set | ||
1186 | # CONFIG_CRYPTO_MANAGER2 is not set | ||
1187 | # CONFIG_CRYPTO_GF128MUL is not set | ||
1188 | # CONFIG_CRYPTO_NULL is not set | ||
1189 | # CONFIG_CRYPTO_PCRYPT is not set | ||
1190 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1191 | # CONFIG_CRYPTO_AUTHENC is not set | ||
1192 | # CONFIG_CRYPTO_TEST is not set | ||
1193 | |||
1194 | # | ||
1195 | # Authenticated Encryption with Associated Data | ||
1196 | # | ||
1197 | # CONFIG_CRYPTO_CCM is not set | ||
1198 | # CONFIG_CRYPTO_GCM is not set | ||
1199 | # CONFIG_CRYPTO_SEQIV is not set | ||
1200 | |||
1201 | # | ||
1202 | # Block modes | ||
1203 | # | ||
1204 | # CONFIG_CRYPTO_CBC is not set | ||
1205 | # CONFIG_CRYPTO_CTR is not set | ||
1206 | # CONFIG_CRYPTO_CTS is not set | ||
1207 | # CONFIG_CRYPTO_ECB is not set | ||
1208 | # CONFIG_CRYPTO_LRW is not set | ||
1209 | # CONFIG_CRYPTO_PCBC is not set | ||
1210 | # CONFIG_CRYPTO_XTS is not set | ||
1211 | |||
1212 | # | ||
1213 | # Hash modes | ||
1214 | # | ||
1215 | # CONFIG_CRYPTO_HMAC is not set | ||
1216 | # CONFIG_CRYPTO_XCBC is not set | ||
1217 | # CONFIG_CRYPTO_VMAC is not set | ||
1218 | |||
1219 | # | ||
1220 | # Digest | ||
1221 | # | ||
1222 | # CONFIG_CRYPTO_CRC32C is not set | ||
1223 | # CONFIG_CRYPTO_GHASH is not set | ||
1224 | # CONFIG_CRYPTO_MD4 is not set | ||
1225 | # CONFIG_CRYPTO_MD5 is not set | ||
1226 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1227 | # CONFIG_CRYPTO_RMD128 is not set | ||
1228 | # CONFIG_CRYPTO_RMD160 is not set | ||
1229 | # CONFIG_CRYPTO_RMD256 is not set | ||
1230 | # CONFIG_CRYPTO_RMD320 is not set | ||
1231 | # CONFIG_CRYPTO_SHA1 is not set | ||
1232 | # CONFIG_CRYPTO_SHA256 is not set | ||
1233 | # CONFIG_CRYPTO_SHA512 is not set | ||
1234 | # CONFIG_CRYPTO_TGR192 is not set | ||
1235 | # CONFIG_CRYPTO_WP512 is not set | ||
1236 | |||
1237 | # | ||
1238 | # Ciphers | ||
1239 | # | ||
1240 | CONFIG_CRYPTO_AES=m | ||
1241 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1242 | # CONFIG_CRYPTO_ARC4 is not set | ||
1243 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1244 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1245 | # CONFIG_CRYPTO_CAST5 is not set | ||
1246 | # CONFIG_CRYPTO_CAST6 is not set | ||
1247 | # CONFIG_CRYPTO_DES is not set | ||
1248 | # CONFIG_CRYPTO_FCRYPT is not set | ||
1249 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1250 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1251 | # CONFIG_CRYPTO_SEED is not set | ||
1252 | # CONFIG_CRYPTO_SERPENT is not set | ||
1253 | # CONFIG_CRYPTO_TEA is not set | ||
1254 | # CONFIG_CRYPTO_TWOFISH is not set | ||
1255 | |||
1256 | # | ||
1257 | # Compression | ||
1258 | # | ||
1259 | # CONFIG_CRYPTO_DEFLATE is not set | ||
1260 | # CONFIG_CRYPTO_ZLIB is not set | ||
1261 | # CONFIG_CRYPTO_LZO is not set | ||
1262 | |||
1263 | # | ||
1264 | # Random Number Generation | ||
1265 | # | ||
1266 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
1267 | CONFIG_CRYPTO_HW=y | ||
1268 | # CONFIG_CRYPTO_DEV_HIFN_795X is not set | ||
1269 | # CONFIG_BINARY_PRINTF is not set | ||
1270 | |||
1271 | # | ||
1272 | # Library routines | ||
1273 | # | ||
1274 | CONFIG_BITREVERSE=y | ||
1275 | CONFIG_GENERIC_FIND_FIRST_BIT=y | ||
1276 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
1277 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
1278 | # CONFIG_CRC_CCITT is not set | ||
1279 | # CONFIG_CRC16 is not set | ||
1280 | # CONFIG_CRC_T10DIF is not set | ||
1281 | # CONFIG_CRC_ITU_T is not set | ||
1282 | CONFIG_CRC32=y | ||
1283 | # CONFIG_CRC7 is not set | ||
1284 | # CONFIG_LIBCRC32C is not set | ||
1285 | CONFIG_ZLIB_INFLATE=y | ||
1286 | CONFIG_DECOMPRESS_GZIP=y | ||
1287 | CONFIG_HAS_IOMEM=y | ||
1288 | CONFIG_HAS_IOPORT=y | ||
1289 | CONFIG_HAS_DMA=y | ||
1290 | CONFIG_NLATTR=y | ||
diff --git a/arch/tile/include/arch/abi.h b/arch/tile/include/arch/abi.h new file mode 100644 index 000000000000..da8df5b9d914 --- /dev/null +++ b/arch/tile/include/arch/abi.h | |||
@@ -0,0 +1,98 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file | ||
17 | * | ||
18 | * ABI-related register definitions helpful when writing assembly code. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_ABI_H__ | ||
22 | #define __ARCH_ABI_H__ | ||
23 | |||
24 | #include <arch/chip.h> | ||
25 | |||
26 | /* Registers 0 - 55 are "normal", but some perform special roles. */ | ||
27 | |||
28 | #define TREG_FP 52 /**< Frame pointer. */ | ||
29 | #define TREG_TP 53 /**< Thread pointer. */ | ||
30 | #define TREG_SP 54 /**< Stack pointer. */ | ||
31 | #define TREG_LR 55 /**< Link to calling function PC. */ | ||
32 | |||
33 | /** Index of last normal general-purpose register. */ | ||
34 | #define TREG_LAST_GPR 55 | ||
35 | |||
36 | /* Registers 56 - 62 are "special" network registers. */ | ||
37 | |||
38 | #define TREG_SN 56 /**< Static network access. */ | ||
39 | #define TREG_IDN0 57 /**< IDN demux 0 access. */ | ||
40 | #define TREG_IDN1 58 /**< IDN demux 1 access. */ | ||
41 | #define TREG_UDN0 59 /**< UDN demux 0 access. */ | ||
42 | #define TREG_UDN1 60 /**< UDN demux 1 access. */ | ||
43 | #define TREG_UDN2 61 /**< UDN demux 2 access. */ | ||
44 | #define TREG_UDN3 62 /**< UDN demux 3 access. */ | ||
45 | |||
46 | /* Register 63 is the "special" zero register. */ | ||
47 | |||
48 | #define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */ | ||
49 | |||
50 | |||
51 | /** By convention, this register is used to hold the syscall number. */ | ||
52 | #define TREG_SYSCALL_NR 10 | ||
53 | |||
54 | /** Name of register that holds the syscall number, for use in assembly. */ | ||
55 | #define TREG_SYSCALL_NR_NAME r10 | ||
56 | |||
57 | |||
58 | /** | ||
59 | * The ABI requires callers to allocate a caller state save area of | ||
60 | * this many bytes at the bottom of each stack frame. | ||
61 | */ | ||
62 | #ifdef __tile__ | ||
63 | #define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__) | ||
64 | #endif | ||
65 | |||
66 | /** | ||
67 | * The operand to an 'info' opcode directing the backtracer to not | ||
68 | * try to find the calling frame. | ||
69 | */ | ||
70 | #define INFO_OP_CANNOT_BACKTRACE 2 | ||
71 | |||
72 | #ifndef __ASSEMBLER__ | ||
73 | #if CHIP_WORD_SIZE() > 32 | ||
74 | |||
75 | /** Unsigned type that can hold a register. */ | ||
76 | typedef unsigned long long uint_reg_t; | ||
77 | |||
78 | /** Signed type that can hold a register. */ | ||
79 | typedef long long int_reg_t; | ||
80 | |||
81 | /** String prefix to use for printf(). */ | ||
82 | #define INT_REG_FMT "ll" | ||
83 | |||
84 | #elif !defined(__LP64__) /* avoid confusion with LP64 cross-build tools */ | ||
85 | |||
86 | /** Unsigned type that can hold a register. */ | ||
87 | typedef unsigned long uint_reg_t; | ||
88 | |||
89 | /** Signed type that can hold a register. */ | ||
90 | typedef long int_reg_t; | ||
91 | |||
92 | /** String prefix to use for printf(). */ | ||
93 | #define INT_REG_FMT "l" | ||
94 | |||
95 | #endif | ||
96 | #endif /* __ASSEMBLER__ */ | ||
97 | |||
98 | #endif /* !__ARCH_ABI_H__ */ | ||
diff --git a/arch/tile/include/arch/chip.h b/arch/tile/include/arch/chip.h new file mode 100644 index 000000000000..926d3db0e91e --- /dev/null +++ b/arch/tile/include/arch/chip.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if __tile_chip__ == 0 | ||
16 | #include <arch/chip_tile64.h> | ||
17 | #elif __tile_chip__ == 1 | ||
18 | #include <arch/chip_tilepro.h> | ||
19 | #elif defined(__tilegx__) | ||
20 | #include <arch/chip_tilegx.h> | ||
21 | #else | ||
22 | #error Unexpected Tilera chip type | ||
23 | #endif | ||
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h new file mode 100644 index 000000000000..1246573be59e --- /dev/null +++ b/arch/tile/include/arch/chip_tile64.h | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * @file | ||
17 | * Global header file. | ||
18 | * This header file specifies defines for TILE64. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_CHIP_H__ | ||
22 | #define __ARCH_CHIP_H__ | ||
23 | |||
24 | /** Specify chip version. | ||
25 | * When possible, prefer the CHIP_xxx symbols below for future-proofing. | ||
26 | * This is intended for cross-compiling; native compilation should | ||
27 | * use the predefined __tile_chip__ symbol. | ||
28 | */ | ||
29 | #define TILE_CHIP 0 | ||
30 | |||
31 | /** Specify chip revision. | ||
32 | * This provides for the case of a respin of a particular chip type; | ||
33 | * the normal value for this symbol is "0". | ||
34 | * This is intended for cross-compiling; native compilation should | ||
35 | * use the predefined __tile_chip_rev__ symbol. | ||
36 | */ | ||
37 | #define TILE_CHIP_REV 0 | ||
38 | |||
39 | /** The name of this architecture. */ | ||
40 | #define CHIP_ARCH_NAME "tile64" | ||
41 | |||
42 | /** The ELF e_machine type for binaries for this chip. */ | ||
43 | #define CHIP_ELF_TYPE() EM_TILE64 | ||
44 | |||
45 | /** The alternate ELF e_machine type for binaries for this chip. */ | ||
46 | #define CHIP_COMPAT_ELF_TYPE() 0x2506 | ||
47 | |||
48 | /** What is the native word size of the machine? */ | ||
49 | #define CHIP_WORD_SIZE() 32 | ||
50 | |||
51 | /** How many bits of a virtual address are used. Extra bits must be | ||
52 | * the sign extension of the low bits. | ||
53 | */ | ||
54 | #define CHIP_VA_WIDTH() 32 | ||
55 | |||
56 | /** How many bits are in a physical address? */ | ||
57 | #define CHIP_PA_WIDTH() 36 | ||
58 | |||
59 | /** Size of the L2 cache, in bytes. */ | ||
60 | #define CHIP_L2_CACHE_SIZE() 65536 | ||
61 | |||
62 | /** Log size of an L2 cache line in bytes. */ | ||
63 | #define CHIP_L2_LOG_LINE_SIZE() 6 | ||
64 | |||
65 | /** Size of an L2 cache line, in bytes. */ | ||
66 | #define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE()) | ||
67 | |||
68 | /** Associativity of the L2 cache. */ | ||
69 | #define CHIP_L2_ASSOC() 2 | ||
70 | |||
71 | /** Size of the L1 data cache, in bytes. */ | ||
72 | #define CHIP_L1D_CACHE_SIZE() 8192 | ||
73 | |||
74 | /** Log size of an L1 data cache line in bytes. */ | ||
75 | #define CHIP_L1D_LOG_LINE_SIZE() 4 | ||
76 | |||
77 | /** Size of an L1 data cache line, in bytes. */ | ||
78 | #define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE()) | ||
79 | |||
80 | /** Associativity of the L1 data cache. */ | ||
81 | #define CHIP_L1D_ASSOC() 2 | ||
82 | |||
83 | /** Size of the L1 instruction cache, in bytes. */ | ||
84 | #define CHIP_L1I_CACHE_SIZE() 8192 | ||
85 | |||
86 | /** Log size of an L1 instruction cache line in bytes. */ | ||
87 | #define CHIP_L1I_LOG_LINE_SIZE() 6 | ||
88 | |||
89 | /** Size of an L1 instruction cache line, in bytes. */ | ||
90 | #define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE()) | ||
91 | |||
92 | /** Associativity of the L1 instruction cache. */ | ||
93 | #define CHIP_L1I_ASSOC() 1 | ||
94 | |||
95 | /** Stride with which flush instructions must be issued. */ | ||
96 | #define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE() | ||
97 | |||
98 | /** Stride with which inv instructions must be issued. */ | ||
99 | #define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE() | ||
100 | |||
101 | /** Stride with which finv instructions must be issued. */ | ||
102 | #define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE() | ||
103 | |||
104 | /** Can the local cache coherently cache data that is homed elsewhere? */ | ||
105 | #define CHIP_HAS_COHERENT_LOCAL_CACHE() 0 | ||
106 | |||
107 | /** How many simultaneous outstanding victims can the L2 cache have? */ | ||
108 | #define CHIP_MAX_OUTSTANDING_VICTIMS() 2 | ||
109 | |||
110 | /** Does the TLB support the NC and NOALLOC bits? */ | ||
111 | #define CHIP_HAS_NC_AND_NOALLOC_BITS() 0 | ||
112 | |||
113 | /** Does the chip support hash-for-home caching? */ | ||
114 | #define CHIP_HAS_CBOX_HOME_MAP() 0 | ||
115 | |||
116 | /** Number of entries in the chip's home map tables. */ | ||
117 | /* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */ | ||
118 | |||
119 | /** Do uncacheable requests miss in the cache regardless of whether | ||
120 | * there is matching data? */ | ||
121 | #define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0 | ||
122 | |||
123 | /** Does the mf instruction wait for victims? */ | ||
124 | #define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1 | ||
125 | |||
126 | /** Does the chip have an "inv" instruction that doesn't also flush? */ | ||
127 | #define CHIP_HAS_INV() 0 | ||
128 | |||
129 | /** Does the chip have a "wh64" instruction? */ | ||
130 | #define CHIP_HAS_WH64() 0 | ||
131 | |||
132 | /** Does this chip have a 'dword_align' instruction? */ | ||
133 | #define CHIP_HAS_DWORD_ALIGN() 0 | ||
134 | |||
135 | /** Number of performance counters. */ | ||
136 | #define CHIP_PERFORMANCE_COUNTERS() 2 | ||
137 | |||
138 | /** Does this chip have auxiliary performance counters? */ | ||
139 | #define CHIP_HAS_AUX_PERF_COUNTERS() 0 | ||
140 | |||
141 | /** Is the CBOX_MSR1 SPR supported? */ | ||
142 | #define CHIP_HAS_CBOX_MSR1() 0 | ||
143 | |||
144 | /** Is the TILE_RTF_HWM SPR supported? */ | ||
145 | #define CHIP_HAS_TILE_RTF_HWM() 0 | ||
146 | |||
147 | /** Is the TILE_WRITE_PENDING SPR supported? */ | ||
148 | #define CHIP_HAS_TILE_WRITE_PENDING() 0 | ||
149 | |||
150 | /** Is the PROC_STATUS SPR supported? */ | ||
151 | #define CHIP_HAS_PROC_STATUS_SPR() 0 | ||
152 | |||
153 | /** Log of the number of mshims we have. */ | ||
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | ||
155 | |||
156 | /** Are the bases of the interrupt vector areas fixed? */ | ||
157 | #define CHIP_HAS_FIXED_INTVEC_BASE() 1 | ||
158 | |||
159 | /** Are the interrupt masks split up into 2 SPRs? */ | ||
160 | #define CHIP_HAS_SPLIT_INTR_MASK() 1 | ||
161 | |||
162 | /** Is the cycle count split up into 2 SPRs? */ | ||
163 | #define CHIP_HAS_SPLIT_CYCLE() 1 | ||
164 | |||
165 | /** Does the chip have a static network? */ | ||
166 | #define CHIP_HAS_SN() 1 | ||
167 | |||
168 | /** Does the chip have a static network processor? */ | ||
169 | #define CHIP_HAS_SN_PROC() 1 | ||
170 | |||
171 | /** Size of the L1 static network processor instruction cache, in bytes. */ | ||
172 | #define CHIP_L1SNI_CACHE_SIZE() 2048 | ||
173 | |||
174 | /** Does the chip have DMA support in each tile? */ | ||
175 | #define CHIP_HAS_TILE_DMA() 1 | ||
176 | |||
177 | /** Does the chip have the second revision of the directly accessible | ||
178 | * dynamic networks? This encapsulates a number of characteristics, | ||
179 | * including the absence of the catch-all, the absence of inline message | ||
180 | * tags, the absence of support for network context-switching, and so on. | ||
181 | */ | ||
182 | #define CHIP_HAS_REV1_XDN() 0 | ||
183 | |||
184 | /** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */ | ||
185 | #define CHIP_HAS_CMPEXCH() 0 | ||
186 | |||
187 | /** Does the chip have memory-mapped I/O support? */ | ||
188 | #define CHIP_HAS_MMIO() 0 | ||
189 | |||
190 | /** Does the chip have post-completion interrupts? */ | ||
191 | #define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0 | ||
192 | |||
193 | /** Does the chip have native single step support? */ | ||
194 | #define CHIP_HAS_SINGLE_STEP() 0 | ||
195 | |||
196 | #ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */ | ||
197 | |||
198 | /** How many entries are present in the instruction TLB? */ | ||
199 | #define CHIP_ITLB_ENTRIES() 8 | ||
200 | |||
201 | /** How many entries are present in the data TLB? */ | ||
202 | #define CHIP_DTLB_ENTRIES() 16 | ||
203 | |||
204 | /** How many MAF entries does the XAUI shim have? */ | ||
205 | #define CHIP_XAUI_MAF_ENTRIES() 16 | ||
206 | |||
207 | /** Does the memory shim have a source-id table? */ | ||
208 | #define CHIP_HAS_MSHIM_SRCID_TABLE() 1 | ||
209 | |||
210 | /** Does the L1 instruction cache clear on reset? */ | ||
211 | #define CHIP_HAS_L1I_CLEAR_ON_RESET() 0 | ||
212 | |||
213 | /** Does the chip come out of reset with valid coordinates on all tiles? | ||
214 | * Note that if defined, this also implies that the upper left is 1,1. | ||
215 | */ | ||
216 | #define CHIP_HAS_VALID_TILE_COORD_RESET() 0 | ||
217 | |||
218 | /** Does the chip have unified packet formats? */ | ||
219 | #define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0 | ||
220 | |||
221 | /** Does the chip support write reordering? */ | ||
222 | #define CHIP_HAS_WRITE_REORDERING() 0 | ||
223 | |||
224 | /** Does the chip support Y-X routing as well as X-Y? */ | ||
225 | #define CHIP_HAS_Y_X_ROUTING() 0 | ||
226 | |||
227 | /** Is INTCTRL_3 managed with the correct MPL? */ | ||
228 | #define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0 | ||
229 | |||
230 | /** Is it possible to configure the chip to be big-endian? */ | ||
231 | #define CHIP_HAS_BIG_ENDIAN_CONFIG() 0 | ||
232 | |||
233 | /** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */ | ||
234 | #define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0 | ||
235 | |||
236 | /** Is the DIAG_TRACE_WAY SPR supported? */ | ||
237 | #define CHIP_HAS_DIAG_TRACE_WAY() 0 | ||
238 | |||
239 | /** Is the MEM_STRIPE_CONFIG SPR supported? */ | ||
240 | #define CHIP_HAS_MEM_STRIPE_CONFIG() 0 | ||
241 | |||
242 | /** Are the TLB_PERF SPRs supported? */ | ||
243 | #define CHIP_HAS_TLB_PERF() 0 | ||
244 | |||
245 | /** Is the VDN_SNOOP_SHIM_CTL SPR supported? */ | ||
246 | #define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0 | ||
247 | |||
248 | /** Does the chip support rev1 DMA packets? */ | ||
249 | #define CHIP_HAS_REV1_DMA_PACKETS() 0 | ||
250 | |||
251 | /** Does the chip have an IPI shim? */ | ||
252 | #define CHIP_HAS_IPI() 0 | ||
253 | |||
254 | #endif /* !__OPEN_SOURCE__ */ | ||
255 | #endif /* __ARCH_CHIP_H__ */ | ||
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h new file mode 100644 index 000000000000..e864c47fc89c --- /dev/null +++ b/arch/tile/include/arch/chip_tilepro.h | |||
@@ -0,0 +1,255 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * @file | ||
17 | * Global header file. | ||
18 | * This header file specifies defines for TILEPro. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_CHIP_H__ | ||
22 | #define __ARCH_CHIP_H__ | ||
23 | |||
24 | /** Specify chip version. | ||
25 | * When possible, prefer the CHIP_xxx symbols below for future-proofing. | ||
26 | * This is intended for cross-compiling; native compilation should | ||
27 | * use the predefined __tile_chip__ symbol. | ||
28 | */ | ||
29 | #define TILE_CHIP 1 | ||
30 | |||
31 | /** Specify chip revision. | ||
32 | * This provides for the case of a respin of a particular chip type; | ||
33 | * the normal value for this symbol is "0". | ||
34 | * This is intended for cross-compiling; native compilation should | ||
35 | * use the predefined __tile_chip_rev__ symbol. | ||
36 | */ | ||
37 | #define TILE_CHIP_REV 0 | ||
38 | |||
39 | /** The name of this architecture. */ | ||
40 | #define CHIP_ARCH_NAME "tilepro" | ||
41 | |||
42 | /** The ELF e_machine type for binaries for this chip. */ | ||
43 | #define CHIP_ELF_TYPE() EM_TILEPRO | ||
44 | |||
45 | /** The alternate ELF e_machine type for binaries for this chip. */ | ||
46 | #define CHIP_COMPAT_ELF_TYPE() 0x2507 | ||
47 | |||
48 | /** What is the native word size of the machine? */ | ||
49 | #define CHIP_WORD_SIZE() 32 | ||
50 | |||
51 | /** How many bits of a virtual address are used. Extra bits must be | ||
52 | * the sign extension of the low bits. | ||
53 | */ | ||
54 | #define CHIP_VA_WIDTH() 32 | ||
55 | |||
56 | /** How many bits are in a physical address? */ | ||
57 | #define CHIP_PA_WIDTH() 36 | ||
58 | |||
59 | /** Size of the L2 cache, in bytes. */ | ||
60 | #define CHIP_L2_CACHE_SIZE() 65536 | ||
61 | |||
62 | /** Log size of an L2 cache line in bytes. */ | ||
63 | #define CHIP_L2_LOG_LINE_SIZE() 6 | ||
64 | |||
65 | /** Size of an L2 cache line, in bytes. */ | ||
66 | #define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE()) | ||
67 | |||
68 | /** Associativity of the L2 cache. */ | ||
69 | #define CHIP_L2_ASSOC() 4 | ||
70 | |||
71 | /** Size of the L1 data cache, in bytes. */ | ||
72 | #define CHIP_L1D_CACHE_SIZE() 8192 | ||
73 | |||
74 | /** Log size of an L1 data cache line in bytes. */ | ||
75 | #define CHIP_L1D_LOG_LINE_SIZE() 4 | ||
76 | |||
77 | /** Size of an L1 data cache line, in bytes. */ | ||
78 | #define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE()) | ||
79 | |||
80 | /** Associativity of the L1 data cache. */ | ||
81 | #define CHIP_L1D_ASSOC() 2 | ||
82 | |||
83 | /** Size of the L1 instruction cache, in bytes. */ | ||
84 | #define CHIP_L1I_CACHE_SIZE() 16384 | ||
85 | |||
86 | /** Log size of an L1 instruction cache line in bytes. */ | ||
87 | #define CHIP_L1I_LOG_LINE_SIZE() 6 | ||
88 | |||
89 | /** Size of an L1 instruction cache line, in bytes. */ | ||
90 | #define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE()) | ||
91 | |||
92 | /** Associativity of the L1 instruction cache. */ | ||
93 | #define CHIP_L1I_ASSOC() 1 | ||
94 | |||
95 | /** Stride with which flush instructions must be issued. */ | ||
96 | #define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE() | ||
97 | |||
98 | /** Stride with which inv instructions must be issued. */ | ||
99 | #define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE() | ||
100 | |||
101 | /** Stride with which finv instructions must be issued. */ | ||
102 | #define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE() | ||
103 | |||
104 | /** Can the local cache coherently cache data that is homed elsewhere? */ | ||
105 | #define CHIP_HAS_COHERENT_LOCAL_CACHE() 1 | ||
106 | |||
107 | /** How many simultaneous outstanding victims can the L2 cache have? */ | ||
108 | #define CHIP_MAX_OUTSTANDING_VICTIMS() 4 | ||
109 | |||
110 | /** Does the TLB support the NC and NOALLOC bits? */ | ||
111 | #define CHIP_HAS_NC_AND_NOALLOC_BITS() 1 | ||
112 | |||
113 | /** Does the chip support hash-for-home caching? */ | ||
114 | #define CHIP_HAS_CBOX_HOME_MAP() 1 | ||
115 | |||
116 | /** Number of entries in the chip's home map tables. */ | ||
117 | #define CHIP_CBOX_HOME_MAP_SIZE() 64 | ||
118 | |||
119 | /** Do uncacheable requests miss in the cache regardless of whether | ||
120 | * there is matching data? */ | ||
121 | #define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1 | ||
122 | |||
123 | /** Does the mf instruction wait for victims? */ | ||
124 | #define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0 | ||
125 | |||
126 | /** Does the chip have an "inv" instruction that doesn't also flush? */ | ||
127 | #define CHIP_HAS_INV() 1 | ||
128 | |||
129 | /** Does the chip have a "wh64" instruction? */ | ||
130 | #define CHIP_HAS_WH64() 1 | ||
131 | |||
132 | /** Does this chip have a 'dword_align' instruction? */ | ||
133 | #define CHIP_HAS_DWORD_ALIGN() 1 | ||
134 | |||
135 | /** Number of performance counters. */ | ||
136 | #define CHIP_PERFORMANCE_COUNTERS() 4 | ||
137 | |||
138 | /** Does this chip have auxiliary performance counters? */ | ||
139 | #define CHIP_HAS_AUX_PERF_COUNTERS() 1 | ||
140 | |||
141 | /** Is the CBOX_MSR1 SPR supported? */ | ||
142 | #define CHIP_HAS_CBOX_MSR1() 1 | ||
143 | |||
144 | /** Is the TILE_RTF_HWM SPR supported? */ | ||
145 | #define CHIP_HAS_TILE_RTF_HWM() 1 | ||
146 | |||
147 | /** Is the TILE_WRITE_PENDING SPR supported? */ | ||
148 | #define CHIP_HAS_TILE_WRITE_PENDING() 1 | ||
149 | |||
150 | /** Is the PROC_STATUS SPR supported? */ | ||
151 | #define CHIP_HAS_PROC_STATUS_SPR() 1 | ||
152 | |||
153 | /** Log of the number of mshims we have. */ | ||
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | ||
155 | |||
156 | /** Are the bases of the interrupt vector areas fixed? */ | ||
157 | #define CHIP_HAS_FIXED_INTVEC_BASE() 1 | ||
158 | |||
159 | /** Are the interrupt masks split up into 2 SPRs? */ | ||
160 | #define CHIP_HAS_SPLIT_INTR_MASK() 1 | ||
161 | |||
162 | /** Is the cycle count split up into 2 SPRs? */ | ||
163 | #define CHIP_HAS_SPLIT_CYCLE() 1 | ||
164 | |||
165 | /** Does the chip have a static network? */ | ||
166 | #define CHIP_HAS_SN() 1 | ||
167 | |||
168 | /** Does the chip have a static network processor? */ | ||
169 | #define CHIP_HAS_SN_PROC() 0 | ||
170 | |||
171 | /** Size of the L1 static network processor instruction cache, in bytes. */ | ||
172 | /* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 1 */ | ||
173 | |||
174 | /** Does the chip have DMA support in each tile? */ | ||
175 | #define CHIP_HAS_TILE_DMA() 1 | ||
176 | |||
177 | /** Does the chip have the second revision of the directly accessible | ||
178 | * dynamic networks? This encapsulates a number of characteristics, | ||
179 | * including the absence of the catch-all, the absence of inline message | ||
180 | * tags, the absence of support for network context-switching, and so on. | ||
181 | */ | ||
182 | #define CHIP_HAS_REV1_XDN() 0 | ||
183 | |||
184 | /** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */ | ||
185 | #define CHIP_HAS_CMPEXCH() 0 | ||
186 | |||
187 | /** Does the chip have memory-mapped I/O support? */ | ||
188 | #define CHIP_HAS_MMIO() 0 | ||
189 | |||
190 | /** Does the chip have post-completion interrupts? */ | ||
191 | #define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0 | ||
192 | |||
193 | /** Does the chip have native single step support? */ | ||
194 | #define CHIP_HAS_SINGLE_STEP() 0 | ||
195 | |||
196 | #ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */ | ||
197 | |||
198 | /** How many entries are present in the instruction TLB? */ | ||
199 | #define CHIP_ITLB_ENTRIES() 16 | ||
200 | |||
201 | /** How many entries are present in the data TLB? */ | ||
202 | #define CHIP_DTLB_ENTRIES() 16 | ||
203 | |||
204 | /** How many MAF entries does the XAUI shim have? */ | ||
205 | #define CHIP_XAUI_MAF_ENTRIES() 32 | ||
206 | |||
207 | /** Does the memory shim have a source-id table? */ | ||
208 | #define CHIP_HAS_MSHIM_SRCID_TABLE() 0 | ||
209 | |||
210 | /** Does the L1 instruction cache clear on reset? */ | ||
211 | #define CHIP_HAS_L1I_CLEAR_ON_RESET() 1 | ||
212 | |||
213 | /** Does the chip come out of reset with valid coordinates on all tiles? | ||
214 | * Note that if defined, this also implies that the upper left is 1,1. | ||
215 | */ | ||
216 | #define CHIP_HAS_VALID_TILE_COORD_RESET() 1 | ||
217 | |||
218 | /** Does the chip have unified packet formats? */ | ||
219 | #define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1 | ||
220 | |||
221 | /** Does the chip support write reordering? */ | ||
222 | #define CHIP_HAS_WRITE_REORDERING() 1 | ||
223 | |||
224 | /** Does the chip support Y-X routing as well as X-Y? */ | ||
225 | #define CHIP_HAS_Y_X_ROUTING() 1 | ||
226 | |||
227 | /** Is INTCTRL_3 managed with the correct MPL? */ | ||
228 | #define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1 | ||
229 | |||
230 | /** Is it possible to configure the chip to be big-endian? */ | ||
231 | #define CHIP_HAS_BIG_ENDIAN_CONFIG() 1 | ||
232 | |||
233 | /** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */ | ||
234 | #define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 1 | ||
235 | |||
236 | /** Is the DIAG_TRACE_WAY SPR supported? */ | ||
237 | #define CHIP_HAS_DIAG_TRACE_WAY() 1 | ||
238 | |||
239 | /** Is the MEM_STRIPE_CONFIG SPR supported? */ | ||
240 | #define CHIP_HAS_MEM_STRIPE_CONFIG() 1 | ||
241 | |||
242 | /** Are the TLB_PERF SPRs supported? */ | ||
243 | #define CHIP_HAS_TLB_PERF() 1 | ||
244 | |||
245 | /** Is the VDN_SNOOP_SHIM_CTL SPR supported? */ | ||
246 | #define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 1 | ||
247 | |||
248 | /** Does the chip support rev1 DMA packets? */ | ||
249 | #define CHIP_HAS_REV1_DMA_PACKETS() 1 | ||
250 | |||
251 | /** Does the chip have an IPI shim? */ | ||
252 | #define CHIP_HAS_IPI() 0 | ||
253 | |||
254 | #endif /* !__OPEN_SOURCE__ */ | ||
255 | #endif /* __ARCH_CHIP_H__ */ | ||
diff --git a/arch/tile/include/arch/icache.h b/arch/tile/include/arch/icache.h new file mode 100644 index 000000000000..5c87c9016338 --- /dev/null +++ b/arch/tile/include/arch/icache.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | /** | ||
17 | * @file | ||
18 | * | ||
19 | * Support for invalidating bytes in the instruction | ||
20 | */ | ||
21 | |||
22 | #ifndef __ARCH_ICACHE_H__ | ||
23 | #define __ARCH_ICACHE_H__ | ||
24 | |||
25 | #include <arch/chip.h> | ||
26 | |||
27 | |||
28 | /** | ||
29 | * Invalidate the instruction cache for the given range of memory. | ||
30 | * | ||
31 | * @param addr The start of memory to be invalidated. | ||
32 | * @param size The number of bytes to be invalidated. | ||
33 | * @param page_size The system's page size, typically the PAGE_SIZE constant | ||
34 | * in sys/page.h. This value must be a power of two no larger | ||
35 | * than the page containing the code to be invalidated. If the value | ||
36 | * is smaller than the actual page size, this function will still | ||
37 | * work, but may run slower than necessary. | ||
38 | */ | ||
39 | static __inline void | ||
40 | invalidate_icache(const void* addr, unsigned long size, | ||
41 | unsigned long page_size) | ||
42 | { | ||
43 | const unsigned long cache_way_size = | ||
44 | CHIP_L1I_CACHE_SIZE() / CHIP_L1I_ASSOC(); | ||
45 | unsigned long max_useful_size; | ||
46 | const char* start, *end; | ||
47 | long num_passes; | ||
48 | |||
49 | if (__builtin_expect(size == 0, 0)) | ||
50 | return; | ||
51 | |||
52 | #ifdef __tilegx__ | ||
53 | /* Limit the number of bytes visited to avoid redundant iterations. */ | ||
54 | max_useful_size = (page_size < cache_way_size) ? page_size : cache_way_size; | ||
55 | |||
56 | /* No PA aliasing is possible, so one pass always suffices. */ | ||
57 | num_passes = 1; | ||
58 | #else | ||
59 | /* Limit the number of bytes visited to avoid redundant iterations. */ | ||
60 | max_useful_size = cache_way_size; | ||
61 | |||
62 | /* | ||
63 | * Compute how many passes we need (we'll treat 0 as if it were 1). | ||
64 | * This works because we know the page size is a power of two. | ||
65 | */ | ||
66 | num_passes = cache_way_size >> __builtin_ctzl(page_size); | ||
67 | #endif | ||
68 | |||
69 | if (__builtin_expect(size > max_useful_size, 0)) | ||
70 | size = max_useful_size; | ||
71 | |||
72 | /* Locate the first and last bytes to be invalidated. */ | ||
73 | start = (const char *)((unsigned long)addr & -CHIP_L1I_LINE_SIZE()); | ||
74 | end = (const char*)addr + size - 1; | ||
75 | |||
76 | __insn_mf(); | ||
77 | |||
78 | do | ||
79 | { | ||
80 | const char* p; | ||
81 | |||
82 | for (p = start; p <= end; p += CHIP_L1I_LINE_SIZE()) | ||
83 | __insn_icoh(p); | ||
84 | |||
85 | start += page_size; | ||
86 | end += page_size; | ||
87 | } | ||
88 | while (--num_passes > 0); | ||
89 | |||
90 | __insn_drain(); | ||
91 | } | ||
92 | |||
93 | |||
94 | #endif /* __ARCH_ICACHE_H__ */ | ||
diff --git a/arch/tile/include/arch/interrupts.h b/arch/tile/include/arch/interrupts.h new file mode 100644 index 000000000000..20f8f07d2de9 --- /dev/null +++ b/arch/tile/include/arch/interrupts.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifdef __tilegx__ | ||
16 | #include <arch/interrupts_64.h> | ||
17 | #else | ||
18 | #include <arch/interrupts_32.h> | ||
19 | #endif | ||
diff --git a/arch/tile/include/arch/interrupts_32.h b/arch/tile/include/arch/interrupts_32.h new file mode 100644 index 000000000000..9d0bfa7e59be --- /dev/null +++ b/arch/tile/include/arch/interrupts_32.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | ||
16 | #define __ARCH_INTERRUPTS_H__ | ||
17 | |||
18 | /** Mask for an interrupt. */ | ||
19 | #ifdef __ASSEMBLER__ | ||
20 | /* Note: must handle breaking interrupts into high and low words manually. */ | ||
21 | #define INT_MASK(intno) (1 << (intno)) | ||
22 | #else | ||
23 | #define INT_MASK(intno) (1ULL << (intno)) | ||
24 | #endif | ||
25 | |||
26 | |||
27 | /** Where a given interrupt executes */ | ||
28 | #define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8)) | ||
29 | |||
30 | /** Where to store a vector for a given interrupt. */ | ||
31 | #define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0) | ||
32 | |||
33 | /** The base address of user-level interrupts. */ | ||
34 | #define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0) | ||
35 | |||
36 | |||
37 | /** Additional synthetic interrupt. */ | ||
38 | #define INT_BREAKPOINT (63) | ||
39 | |||
40 | #define INT_ITLB_MISS 0 | ||
41 | #define INT_MEM_ERROR 1 | ||
42 | #define INT_ILL 2 | ||
43 | #define INT_GPV 3 | ||
44 | #define INT_SN_ACCESS 4 | ||
45 | #define INT_IDN_ACCESS 5 | ||
46 | #define INT_UDN_ACCESS 6 | ||
47 | #define INT_IDN_REFILL 7 | ||
48 | #define INT_UDN_REFILL 8 | ||
49 | #define INT_IDN_COMPLETE 9 | ||
50 | #define INT_UDN_COMPLETE 10 | ||
51 | #define INT_SWINT_3 11 | ||
52 | #define INT_SWINT_2 12 | ||
53 | #define INT_SWINT_1 13 | ||
54 | #define INT_SWINT_0 14 | ||
55 | #define INT_UNALIGN_DATA 15 | ||
56 | #define INT_DTLB_MISS 16 | ||
57 | #define INT_DTLB_ACCESS 17 | ||
58 | #define INT_DMATLB_MISS 18 | ||
59 | #define INT_DMATLB_ACCESS 19 | ||
60 | #define INT_SNITLB_MISS 20 | ||
61 | #define INT_SN_NOTIFY 21 | ||
62 | #define INT_SN_FIREWALL 22 | ||
63 | #define INT_IDN_FIREWALL 23 | ||
64 | #define INT_UDN_FIREWALL 24 | ||
65 | #define INT_TILE_TIMER 25 | ||
66 | #define INT_IDN_TIMER 26 | ||
67 | #define INT_UDN_TIMER 27 | ||
68 | #define INT_DMA_NOTIFY 28 | ||
69 | #define INT_IDN_CA 29 | ||
70 | #define INT_UDN_CA 30 | ||
71 | #define INT_IDN_AVAIL 31 | ||
72 | #define INT_UDN_AVAIL 32 | ||
73 | #define INT_PERF_COUNT 33 | ||
74 | #define INT_INTCTRL_3 34 | ||
75 | #define INT_INTCTRL_2 35 | ||
76 | #define INT_INTCTRL_1 36 | ||
77 | #define INT_INTCTRL_0 37 | ||
78 | #define INT_BOOT_ACCESS 38 | ||
79 | #define INT_WORLD_ACCESS 39 | ||
80 | #define INT_I_ASID 40 | ||
81 | #define INT_D_ASID 41 | ||
82 | #define INT_DMA_ASID 42 | ||
83 | #define INT_SNI_ASID 43 | ||
84 | #define INT_DMA_CPL 44 | ||
85 | #define INT_SN_CPL 45 | ||
86 | #define INT_DOUBLE_FAULT 46 | ||
87 | #define INT_SN_STATIC_ACCESS 47 | ||
88 | #define INT_AUX_PERF_COUNT 48 | ||
89 | |||
90 | #define NUM_INTERRUPTS 49 | ||
91 | |||
92 | #define QUEUED_INTERRUPTS ( \ | ||
93 | INT_MASK(INT_MEM_ERROR) | \ | ||
94 | INT_MASK(INT_DMATLB_MISS) | \ | ||
95 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
96 | INT_MASK(INT_SNITLB_MISS) | \ | ||
97 | INT_MASK(INT_SN_NOTIFY) | \ | ||
98 | INT_MASK(INT_SN_FIREWALL) | \ | ||
99 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
100 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
101 | INT_MASK(INT_TILE_TIMER) | \ | ||
102 | INT_MASK(INT_IDN_TIMER) | \ | ||
103 | INT_MASK(INT_UDN_TIMER) | \ | ||
104 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
105 | INT_MASK(INT_IDN_CA) | \ | ||
106 | INT_MASK(INT_UDN_CA) | \ | ||
107 | INT_MASK(INT_IDN_AVAIL) | \ | ||
108 | INT_MASK(INT_UDN_AVAIL) | \ | ||
109 | INT_MASK(INT_PERF_COUNT) | \ | ||
110 | INT_MASK(INT_INTCTRL_3) | \ | ||
111 | INT_MASK(INT_INTCTRL_2) | \ | ||
112 | INT_MASK(INT_INTCTRL_1) | \ | ||
113 | INT_MASK(INT_INTCTRL_0) | \ | ||
114 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
115 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
116 | INT_MASK(INT_I_ASID) | \ | ||
117 | INT_MASK(INT_D_ASID) | \ | ||
118 | INT_MASK(INT_DMA_ASID) | \ | ||
119 | INT_MASK(INT_SNI_ASID) | \ | ||
120 | INT_MASK(INT_DMA_CPL) | \ | ||
121 | INT_MASK(INT_SN_CPL) | \ | ||
122 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
123 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
124 | 0) | ||
125 | #define NONQUEUED_INTERRUPTS ( \ | ||
126 | INT_MASK(INT_ITLB_MISS) | \ | ||
127 | INT_MASK(INT_ILL) | \ | ||
128 | INT_MASK(INT_GPV) | \ | ||
129 | INT_MASK(INT_SN_ACCESS) | \ | ||
130 | INT_MASK(INT_IDN_ACCESS) | \ | ||
131 | INT_MASK(INT_UDN_ACCESS) | \ | ||
132 | INT_MASK(INT_IDN_REFILL) | \ | ||
133 | INT_MASK(INT_UDN_REFILL) | \ | ||
134 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
135 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
136 | INT_MASK(INT_SWINT_3) | \ | ||
137 | INT_MASK(INT_SWINT_2) | \ | ||
138 | INT_MASK(INT_SWINT_1) | \ | ||
139 | INT_MASK(INT_SWINT_0) | \ | ||
140 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
141 | INT_MASK(INT_DTLB_MISS) | \ | ||
142 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
143 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
144 | 0) | ||
145 | #define CRITICAL_MASKED_INTERRUPTS ( \ | ||
146 | INT_MASK(INT_MEM_ERROR) | \ | ||
147 | INT_MASK(INT_DMATLB_MISS) | \ | ||
148 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
149 | INT_MASK(INT_SNITLB_MISS) | \ | ||
150 | INT_MASK(INT_SN_NOTIFY) | \ | ||
151 | INT_MASK(INT_SN_FIREWALL) | \ | ||
152 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
153 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
154 | INT_MASK(INT_TILE_TIMER) | \ | ||
155 | INT_MASK(INT_IDN_TIMER) | \ | ||
156 | INT_MASK(INT_UDN_TIMER) | \ | ||
157 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
158 | INT_MASK(INT_IDN_CA) | \ | ||
159 | INT_MASK(INT_UDN_CA) | \ | ||
160 | INT_MASK(INT_IDN_AVAIL) | \ | ||
161 | INT_MASK(INT_UDN_AVAIL) | \ | ||
162 | INT_MASK(INT_PERF_COUNT) | \ | ||
163 | INT_MASK(INT_INTCTRL_3) | \ | ||
164 | INT_MASK(INT_INTCTRL_2) | \ | ||
165 | INT_MASK(INT_INTCTRL_1) | \ | ||
166 | INT_MASK(INT_INTCTRL_0) | \ | ||
167 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
168 | 0) | ||
169 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | ||
170 | INT_MASK(INT_ITLB_MISS) | \ | ||
171 | INT_MASK(INT_ILL) | \ | ||
172 | INT_MASK(INT_GPV) | \ | ||
173 | INT_MASK(INT_SN_ACCESS) | \ | ||
174 | INT_MASK(INT_IDN_ACCESS) | \ | ||
175 | INT_MASK(INT_UDN_ACCESS) | \ | ||
176 | INT_MASK(INT_IDN_REFILL) | \ | ||
177 | INT_MASK(INT_UDN_REFILL) | \ | ||
178 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
179 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
180 | INT_MASK(INT_SWINT_3) | \ | ||
181 | INT_MASK(INT_SWINT_2) | \ | ||
182 | INT_MASK(INT_SWINT_1) | \ | ||
183 | INT_MASK(INT_SWINT_0) | \ | ||
184 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
185 | INT_MASK(INT_DTLB_MISS) | \ | ||
186 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
187 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
188 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
189 | INT_MASK(INT_I_ASID) | \ | ||
190 | INT_MASK(INT_D_ASID) | \ | ||
191 | INT_MASK(INT_DMA_ASID) | \ | ||
192 | INT_MASK(INT_SNI_ASID) | \ | ||
193 | INT_MASK(INT_DMA_CPL) | \ | ||
194 | INT_MASK(INT_SN_CPL) | \ | ||
195 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
196 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
197 | 0) | ||
198 | #define MASKABLE_INTERRUPTS ( \ | ||
199 | INT_MASK(INT_MEM_ERROR) | \ | ||
200 | INT_MASK(INT_IDN_REFILL) | \ | ||
201 | INT_MASK(INT_UDN_REFILL) | \ | ||
202 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
203 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
204 | INT_MASK(INT_DMATLB_MISS) | \ | ||
205 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
206 | INT_MASK(INT_SNITLB_MISS) | \ | ||
207 | INT_MASK(INT_SN_NOTIFY) | \ | ||
208 | INT_MASK(INT_SN_FIREWALL) | \ | ||
209 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
210 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
211 | INT_MASK(INT_TILE_TIMER) | \ | ||
212 | INT_MASK(INT_IDN_TIMER) | \ | ||
213 | INT_MASK(INT_UDN_TIMER) | \ | ||
214 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
215 | INT_MASK(INT_IDN_CA) | \ | ||
216 | INT_MASK(INT_UDN_CA) | \ | ||
217 | INT_MASK(INT_IDN_AVAIL) | \ | ||
218 | INT_MASK(INT_UDN_AVAIL) | \ | ||
219 | INT_MASK(INT_PERF_COUNT) | \ | ||
220 | INT_MASK(INT_INTCTRL_3) | \ | ||
221 | INT_MASK(INT_INTCTRL_2) | \ | ||
222 | INT_MASK(INT_INTCTRL_1) | \ | ||
223 | INT_MASK(INT_INTCTRL_0) | \ | ||
224 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
225 | 0) | ||
226 | #define UNMASKABLE_INTERRUPTS ( \ | ||
227 | INT_MASK(INT_ITLB_MISS) | \ | ||
228 | INT_MASK(INT_ILL) | \ | ||
229 | INT_MASK(INT_GPV) | \ | ||
230 | INT_MASK(INT_SN_ACCESS) | \ | ||
231 | INT_MASK(INT_IDN_ACCESS) | \ | ||
232 | INT_MASK(INT_UDN_ACCESS) | \ | ||
233 | INT_MASK(INT_SWINT_3) | \ | ||
234 | INT_MASK(INT_SWINT_2) | \ | ||
235 | INT_MASK(INT_SWINT_1) | \ | ||
236 | INT_MASK(INT_SWINT_0) | \ | ||
237 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
238 | INT_MASK(INT_DTLB_MISS) | \ | ||
239 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
240 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
241 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
242 | INT_MASK(INT_I_ASID) | \ | ||
243 | INT_MASK(INT_D_ASID) | \ | ||
244 | INT_MASK(INT_DMA_ASID) | \ | ||
245 | INT_MASK(INT_SNI_ASID) | \ | ||
246 | INT_MASK(INT_DMA_CPL) | \ | ||
247 | INT_MASK(INT_SN_CPL) | \ | ||
248 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
249 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
250 | 0) | ||
251 | #define SYNC_INTERRUPTS ( \ | ||
252 | INT_MASK(INT_ITLB_MISS) | \ | ||
253 | INT_MASK(INT_ILL) | \ | ||
254 | INT_MASK(INT_GPV) | \ | ||
255 | INT_MASK(INT_SN_ACCESS) | \ | ||
256 | INT_MASK(INT_IDN_ACCESS) | \ | ||
257 | INT_MASK(INT_UDN_ACCESS) | \ | ||
258 | INT_MASK(INT_IDN_REFILL) | \ | ||
259 | INT_MASK(INT_UDN_REFILL) | \ | ||
260 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
261 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
262 | INT_MASK(INT_SWINT_3) | \ | ||
263 | INT_MASK(INT_SWINT_2) | \ | ||
264 | INT_MASK(INT_SWINT_1) | \ | ||
265 | INT_MASK(INT_SWINT_0) | \ | ||
266 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
267 | INT_MASK(INT_DTLB_MISS) | \ | ||
268 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
269 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
270 | 0) | ||
271 | #define NON_SYNC_INTERRUPTS ( \ | ||
272 | INT_MASK(INT_MEM_ERROR) | \ | ||
273 | INT_MASK(INT_DMATLB_MISS) | \ | ||
274 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
275 | INT_MASK(INT_SNITLB_MISS) | \ | ||
276 | INT_MASK(INT_SN_NOTIFY) | \ | ||
277 | INT_MASK(INT_SN_FIREWALL) | \ | ||
278 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
279 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
280 | INT_MASK(INT_TILE_TIMER) | \ | ||
281 | INT_MASK(INT_IDN_TIMER) | \ | ||
282 | INT_MASK(INT_UDN_TIMER) | \ | ||
283 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
284 | INT_MASK(INT_IDN_CA) | \ | ||
285 | INT_MASK(INT_UDN_CA) | \ | ||
286 | INT_MASK(INT_IDN_AVAIL) | \ | ||
287 | INT_MASK(INT_UDN_AVAIL) | \ | ||
288 | INT_MASK(INT_PERF_COUNT) | \ | ||
289 | INT_MASK(INT_INTCTRL_3) | \ | ||
290 | INT_MASK(INT_INTCTRL_2) | \ | ||
291 | INT_MASK(INT_INTCTRL_1) | \ | ||
292 | INT_MASK(INT_INTCTRL_0) | \ | ||
293 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
294 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
295 | INT_MASK(INT_I_ASID) | \ | ||
296 | INT_MASK(INT_D_ASID) | \ | ||
297 | INT_MASK(INT_DMA_ASID) | \ | ||
298 | INT_MASK(INT_SNI_ASID) | \ | ||
299 | INT_MASK(INT_DMA_CPL) | \ | ||
300 | INT_MASK(INT_SN_CPL) | \ | ||
301 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
302 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
303 | 0) | ||
304 | #endif /* !__ARCH_INTERRUPTS_H__ */ | ||
diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h new file mode 100644 index 000000000000..6418fbde063e --- /dev/null +++ b/arch/tile/include/arch/sim_def.h | |||
@@ -0,0 +1,512 @@ | |||
1 | // Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
2 | // | ||
3 | // This program is free software; you can redistribute it and/or | ||
4 | // modify it under the terms of the GNU General Public License | ||
5 | // as published by the Free Software Foundation, version 2. | ||
6 | // | ||
7 | // This program is distributed in the hope that it will be useful, but | ||
8 | // WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | // MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
10 | // NON INFRINGEMENT. See the GNU General Public License for | ||
11 | // more details. | ||
12 | |||
13 | //! @file | ||
14 | //! | ||
15 | //! Some low-level simulator definitions. | ||
16 | //! | ||
17 | |||
18 | #ifndef __ARCH_SIM_DEF_H__ | ||
19 | #define __ARCH_SIM_DEF_H__ | ||
20 | |||
21 | |||
22 | //! Internal: the low bits of the SIM_CONTROL_* SPR values specify | ||
23 | //! the operation to perform, and the remaining bits are | ||
24 | //! an operation-specific parameter (often unused). | ||
25 | //! | ||
26 | #define _SIM_CONTROL_OPERATOR_BITS 8 | ||
27 | |||
28 | |||
29 | //== Values which can be written to SPR_SIM_CONTROL. | ||
30 | |||
31 | //! If written to SPR_SIM_CONTROL, stops profiling. | ||
32 | //! | ||
33 | #define SIM_CONTROL_PROFILER_DISABLE 0 | ||
34 | |||
35 | //! If written to SPR_SIM_CONTROL, starts profiling. | ||
36 | //! | ||
37 | #define SIM_CONTROL_PROFILER_ENABLE 1 | ||
38 | |||
39 | //! If written to SPR_SIM_CONTROL, clears profiling counters. | ||
40 | //! | ||
41 | #define SIM_CONTROL_PROFILER_CLEAR 2 | ||
42 | |||
43 | //! If written to SPR_SIM_CONTROL, checkpoints the simulator. | ||
44 | //! | ||
45 | #define SIM_CONTROL_CHECKPOINT 3 | ||
46 | |||
47 | //! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), | ||
48 | //! sets the tracing mask to the given mask. See "sim_set_tracing()". | ||
49 | //! | ||
50 | #define SIM_CONTROL_SET_TRACING 4 | ||
51 | |||
52 | //! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), | ||
53 | //! dumps the requested items of machine state to the log. | ||
54 | //! | ||
55 | #define SIM_CONTROL_DUMP 5 | ||
56 | |||
57 | //! If written to SPR_SIM_CONTROL, clears chip-level profiling counters. | ||
58 | //! | ||
59 | #define SIM_CONTROL_PROFILER_CHIP_CLEAR 6 | ||
60 | |||
61 | //! If written to SPR_SIM_CONTROL, disables chip-level profiling. | ||
62 | //! | ||
63 | #define SIM_CONTROL_PROFILER_CHIP_DISABLE 7 | ||
64 | |||
65 | //! If written to SPR_SIM_CONTROL, enables chip-level profiling. | ||
66 | //! | ||
67 | #define SIM_CONTROL_PROFILER_CHIP_ENABLE 8 | ||
68 | |||
69 | //! If written to SPR_SIM_CONTROL, enables chip-level functional mode | ||
70 | //! | ||
71 | #define SIM_CONTROL_ENABLE_FUNCTIONAL 9 | ||
72 | |||
73 | //! If written to SPR_SIM_CONTROL, disables chip-level functional mode. | ||
74 | //! | ||
75 | #define SIM_CONTROL_DISABLE_FUNCTIONAL 10 | ||
76 | |||
77 | //! If written to SPR_SIM_CONTROL, enables chip-level functional mode. | ||
78 | //! All tiles must perform this write for functional mode to be enabled. | ||
79 | //! Ignored in naked boot mode unless --functional is specified. | ||
80 | //! WARNING: Only the hypervisor startup code should use this! | ||
81 | //! | ||
82 | #define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11 | ||
83 | |||
84 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
85 | //! writes a string directly to the simulator output. Written to once for | ||
86 | //! each character in the string, plus a final NUL. Instead of NUL, | ||
87 | //! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY". | ||
88 | //! | ||
89 | // ISSUE: Document the meaning of "newline", and the handling of NUL. | ||
90 | // | ||
91 | #define SIM_CONTROL_PUTC 12 | ||
92 | |||
93 | //! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for | ||
94 | //! this core. This is intended to be used before a loop that will | ||
95 | //! invalidate the cache by loading new data and evicting all current data. | ||
96 | //! Generally speaking, this API should only be used by system code. | ||
97 | //! | ||
98 | #define SIM_CONTROL_GRINDER_CLEAR 13 | ||
99 | |||
100 | //! If written to SPR_SIM_CONTROL, shuts down the simulator. | ||
101 | //! | ||
102 | #define SIM_CONTROL_SHUTDOWN 14 | ||
103 | |||
104 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
105 | //! indicates that a fork syscall just created the given process. | ||
106 | //! | ||
107 | #define SIM_CONTROL_OS_FORK 15 | ||
108 | |||
109 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
110 | //! indicates that an exit syscall was just executed by the given process. | ||
111 | //! | ||
112 | #define SIM_CONTROL_OS_EXIT 16 | ||
113 | |||
114 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
115 | //! indicates that the OS just switched to the given process. | ||
116 | //! | ||
117 | #define SIM_CONTROL_OS_SWITCH 17 | ||
118 | |||
119 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
120 | //! indicates that an exec syscall was just executed. Written to once for | ||
121 | //! each character in the executable name, plus a final NUL. | ||
122 | //! | ||
123 | #define SIM_CONTROL_OS_EXEC 18 | ||
124 | |||
125 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
126 | //! indicates that an interpreter (PT_INTERP) was loaded. Written to once | ||
127 | //! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a | ||
128 | //! hex load address starting with "0x", and "PATH" is the executable name. | ||
129 | //! | ||
130 | #define SIM_CONTROL_OS_INTERP 19 | ||
131 | |||
132 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
133 | //! indicates that a dll was loaded. Written to once for each character | ||
134 | //! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load | ||
135 | //! address starting with "0x", and "PATH" is the executable name. | ||
136 | //! | ||
137 | #define SIM_CONTROL_DLOPEN 20 | ||
138 | |||
139 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
140 | //! indicates that a dll was unloaded. Written to once for each character | ||
141 | //! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load | ||
142 | //! address starting with "0x". | ||
143 | //! | ||
144 | #define SIM_CONTROL_DLCLOSE 21 | ||
145 | |||
146 | //! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8), | ||
147 | //! indicates whether to allow data reads to remotely-cached | ||
148 | //! dirty cache lines to be cached locally without grinder warnings or | ||
149 | //! assertions (used by Linux kernel fast memcpy). | ||
150 | //! | ||
151 | #define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22 | ||
152 | |||
153 | //! If written to SPR_SIM_CONTROL, enables memory tracing. | ||
154 | //! | ||
155 | #define SIM_CONTROL_ENABLE_MEM_LOGGING 23 | ||
156 | |||
157 | //! If written to SPR_SIM_CONTROL, disables memory tracing. | ||
158 | //! | ||
159 | #define SIM_CONTROL_DISABLE_MEM_LOGGING 24 | ||
160 | |||
161 | //! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of | ||
162 | //! the gbe or xgbe shims. Must specify the shim id, the type, the units, and | ||
163 | //! the rate, as defined in SIM_SHAPING_SPR_ARG. | ||
164 | //! | ||
165 | #define SIM_CONTROL_SHAPING 25 | ||
166 | |||
167 | //! If written to SPR_SIM_CONTROL, combined with character (shifted by 8), | ||
168 | //! requests that a simulator command be executed. Written to once for each | ||
169 | //! character in the command, plus a final NUL. | ||
170 | //! | ||
171 | #define SIM_CONTROL_COMMAND 26 | ||
172 | |||
173 | //! If written to SPR_SIM_CONTROL, indicates that the simulated system | ||
174 | //! is panicking, to allow debugging via --debug-on-panic. | ||
175 | //! | ||
176 | #define SIM_CONTROL_PANIC 27 | ||
177 | |||
178 | //! If written to SPR_SIM_CONTROL, triggers a simulator syscall. | ||
179 | //! See "sim_syscall()" for more info. | ||
180 | //! | ||
181 | #define SIM_CONTROL_SYSCALL 32 | ||
182 | |||
183 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
184 | //! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should | ||
185 | //! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH. | ||
186 | //! | ||
187 | #define SIM_CONTROL_OS_FORK_PARENT 33 | ||
188 | |||
189 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
190 | //! (shifted by 8), clears the pending magic data section. The cleared | ||
191 | //! pending magic data section and any subsequently appended magic bytes | ||
192 | //! will only take effect when the classifier blast programmer is run. | ||
193 | #define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34 | ||
194 | |||
195 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
196 | //! (shifted by 8) and a byte of data (shifted by 16), appends that byte | ||
197 | //! to the shim's pending magic data section. The pending magic data | ||
198 | //! section takes effect when the classifier blast programmer is run. | ||
199 | #define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35 | ||
200 | |||
201 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
202 | //! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a | ||
203 | //! mask of links (shifted by 32), enable or disable the corresponding | ||
204 | //! mPIPE links. | ||
205 | #define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36 | ||
206 | |||
207 | //== Syscall numbers for use with "sim_syscall()". | ||
208 | |||
209 | //! Syscall number for sim_add_watchpoint(). | ||
210 | //! | ||
211 | #define SIM_SYSCALL_ADD_WATCHPOINT 2 | ||
212 | |||
213 | //! Syscall number for sim_remove_watchpoint(). | ||
214 | //! | ||
215 | #define SIM_SYSCALL_REMOVE_WATCHPOINT 3 | ||
216 | |||
217 | //! Syscall number for sim_query_watchpoint(). | ||
218 | //! | ||
219 | #define SIM_SYSCALL_QUERY_WATCHPOINT 4 | ||
220 | |||
221 | //! Syscall number that asserts that the cache lines whose 64-bit PA | ||
222 | //! is passed as the second argument to sim_syscall(), and over a | ||
223 | //! range passed as the third argument, are no longer in cache. | ||
224 | //! The simulator raises an error if this is not the case. | ||
225 | //! | ||
226 | #define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5 | ||
227 | |||
228 | |||
229 | //== Bit masks which can be shifted by 8, combined with | ||
230 | //== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL. | ||
231 | |||
232 | //! @addtogroup arch_sim | ||
233 | //! @{ | ||
234 | |||
235 | //! Enable --trace-cycle when passed to simulator_set_tracing(). | ||
236 | //! | ||
237 | #define SIM_TRACE_CYCLES 0x01 | ||
238 | |||
239 | //! Enable --trace-router when passed to simulator_set_tracing(). | ||
240 | //! | ||
241 | #define SIM_TRACE_ROUTER 0x02 | ||
242 | |||
243 | //! Enable --trace-register-writes when passed to simulator_set_tracing(). | ||
244 | //! | ||
245 | #define SIM_TRACE_REGISTER_WRITES 0x04 | ||
246 | |||
247 | //! Enable --trace-disasm when passed to simulator_set_tracing(). | ||
248 | //! | ||
249 | #define SIM_TRACE_DISASM 0x08 | ||
250 | |||
251 | //! Enable --trace-stall-info when passed to simulator_set_tracing(). | ||
252 | //! | ||
253 | #define SIM_TRACE_STALL_INFO 0x10 | ||
254 | |||
255 | //! Enable --trace-memory-controller when passed to simulator_set_tracing(). | ||
256 | //! | ||
257 | #define SIM_TRACE_MEMORY_CONTROLLER 0x20 | ||
258 | |||
259 | //! Enable --trace-l2 when passed to simulator_set_tracing(). | ||
260 | //! | ||
261 | #define SIM_TRACE_L2_CACHE 0x40 | ||
262 | |||
263 | //! Enable --trace-lines when passed to simulator_set_tracing(). | ||
264 | //! | ||
265 | #define SIM_TRACE_LINES 0x80 | ||
266 | |||
267 | //! Turn off all tracing when passed to simulator_set_tracing(). | ||
268 | //! | ||
269 | #define SIM_TRACE_NONE 0 | ||
270 | |||
271 | //! Turn on all tracing when passed to simulator_set_tracing(). | ||
272 | //! | ||
273 | #define SIM_TRACE_ALL (-1) | ||
274 | |||
275 | //! @} | ||
276 | |||
277 | //! Computes the value to write to SPR_SIM_CONTROL to set tracing flags. | ||
278 | //! | ||
279 | #define SIM_TRACE_SPR_ARG(mask) \ | ||
280 | (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
281 | |||
282 | |||
283 | //== Bit masks which can be shifted by 8, combined with | ||
284 | //== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL. | ||
285 | |||
286 | //! @addtogroup arch_sim | ||
287 | //! @{ | ||
288 | |||
289 | //! Dump the general-purpose registers. | ||
290 | //! | ||
291 | #define SIM_DUMP_REGS 0x001 | ||
292 | |||
293 | //! Dump the SPRs. | ||
294 | //! | ||
295 | #define SIM_DUMP_SPRS 0x002 | ||
296 | |||
297 | //! Dump the ITLB. | ||
298 | //! | ||
299 | #define SIM_DUMP_ITLB 0x004 | ||
300 | |||
301 | //! Dump the DTLB. | ||
302 | //! | ||
303 | #define SIM_DUMP_DTLB 0x008 | ||
304 | |||
305 | //! Dump the L1 I-cache. | ||
306 | //! | ||
307 | #define SIM_DUMP_L1I 0x010 | ||
308 | |||
309 | //! Dump the L1 D-cache. | ||
310 | //! | ||
311 | #define SIM_DUMP_L1D 0x020 | ||
312 | |||
313 | //! Dump the L2 cache. | ||
314 | //! | ||
315 | #define SIM_DUMP_L2 0x040 | ||
316 | |||
317 | //! Dump the switch registers. | ||
318 | //! | ||
319 | #define SIM_DUMP_SNREGS 0x080 | ||
320 | |||
321 | //! Dump the switch ITLB. | ||
322 | //! | ||
323 | #define SIM_DUMP_SNITLB 0x100 | ||
324 | |||
325 | //! Dump the switch L1 I-cache. | ||
326 | //! | ||
327 | #define SIM_DUMP_SNL1I 0x200 | ||
328 | |||
329 | //! Dump the current backtrace. | ||
330 | //! | ||
331 | #define SIM_DUMP_BACKTRACE 0x400 | ||
332 | |||
333 | //! Only dump valid lines in caches. | ||
334 | //! | ||
335 | #define SIM_DUMP_VALID_LINES 0x800 | ||
336 | |||
337 | //! Dump everything that is dumpable. | ||
338 | //! | ||
339 | #define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES) | ||
340 | |||
341 | // @} | ||
342 | |||
343 | //! Computes the value to write to SPR_SIM_CONTROL to dump machine state. | ||
344 | //! | ||
345 | #define SIM_DUMP_SPR_ARG(mask) \ | ||
346 | (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
347 | |||
348 | |||
349 | //== Bit masks which can be shifted by 8, combined with | ||
350 | //== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL. | ||
351 | |||
352 | //! @addtogroup arch_sim | ||
353 | //! @{ | ||
354 | |||
355 | //! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. | ||
356 | //! | ||
357 | #define SIM_CHIP_MEMCTL 0x001 | ||
358 | |||
359 | //! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. | ||
360 | //! | ||
361 | #define SIM_CHIP_XAUI 0x002 | ||
362 | |||
363 | //! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. | ||
364 | //! | ||
365 | #define SIM_CHIP_PCIE 0x004 | ||
366 | |||
367 | //! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. | ||
368 | //! | ||
369 | #define SIM_CHIP_MPIPE 0x008 | ||
370 | |||
371 | //! Reference all chip devices. | ||
372 | //! | ||
373 | #define SIM_CHIP_ALL (-1) | ||
374 | |||
375 | //! @} | ||
376 | |||
377 | //! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. | ||
378 | //! | ||
379 | #define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \ | ||
380 | (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
381 | |||
382 | //! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics. | ||
383 | //! | ||
384 | #define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \ | ||
385 | (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
386 | |||
387 | //! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. | ||
388 | //! | ||
389 | #define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \ | ||
390 | (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
391 | |||
392 | |||
393 | |||
394 | // Shim bitrate controls. | ||
395 | |||
396 | //! The number of bits used to store the shim id. | ||
397 | //! | ||
398 | #define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3 | ||
399 | |||
400 | //! @addtogroup arch_sim | ||
401 | //! @{ | ||
402 | |||
403 | //! Change the gbe 0 bitrate. | ||
404 | //! | ||
405 | #define SIM_CONTROL_SHAPING_GBE_0 0x0 | ||
406 | |||
407 | //! Change the gbe 1 bitrate. | ||
408 | //! | ||
409 | #define SIM_CONTROL_SHAPING_GBE_1 0x1 | ||
410 | |||
411 | //! Change the gbe 2 bitrate. | ||
412 | //! | ||
413 | #define SIM_CONTROL_SHAPING_GBE_2 0x2 | ||
414 | |||
415 | //! Change the gbe 3 bitrate. | ||
416 | //! | ||
417 | #define SIM_CONTROL_SHAPING_GBE_3 0x3 | ||
418 | |||
419 | //! Change the xgbe 0 bitrate. | ||
420 | //! | ||
421 | #define SIM_CONTROL_SHAPING_XGBE_0 0x4 | ||
422 | |||
423 | //! Change the xgbe 1 bitrate. | ||
424 | //! | ||
425 | #define SIM_CONTROL_SHAPING_XGBE_1 0x5 | ||
426 | |||
427 | //! The type of shaping to do. | ||
428 | //! | ||
429 | #define SIM_CONTROL_SHAPING_TYPE_BITS 2 | ||
430 | |||
431 | //! Control the multiplier. | ||
432 | //! | ||
433 | #define SIM_CONTROL_SHAPING_MULTIPLIER 0 | ||
434 | |||
435 | //! Control the PPS. | ||
436 | //! | ||
437 | #define SIM_CONTROL_SHAPING_PPS 1 | ||
438 | |||
439 | //! Control the BPS. | ||
440 | //! | ||
441 | #define SIM_CONTROL_SHAPING_BPS 2 | ||
442 | |||
443 | //! The number of bits for the units for the shaping parameter. | ||
444 | //! | ||
445 | #define SIM_CONTROL_SHAPING_UNITS_BITS 2 | ||
446 | |||
447 | //! Provide a number in single units. | ||
448 | //! | ||
449 | #define SIM_CONTROL_SHAPING_UNITS_SINGLE 0 | ||
450 | |||
451 | //! Provide a number in kilo units. | ||
452 | //! | ||
453 | #define SIM_CONTROL_SHAPING_UNITS_KILO 1 | ||
454 | |||
455 | //! Provide a number in mega units. | ||
456 | //! | ||
457 | #define SIM_CONTROL_SHAPING_UNITS_MEGA 2 | ||
458 | |||
459 | //! Provide a number in giga units. | ||
460 | //! | ||
461 | #define SIM_CONTROL_SHAPING_UNITS_GIGA 3 | ||
462 | |||
463 | // @} | ||
464 | |||
465 | //! How many bits are available for the rate. | ||
466 | //! | ||
467 | #define SIM_CONTROL_SHAPING_RATE_BITS \ | ||
468 | (32 - (_SIM_CONTROL_OPERATOR_BITS + \ | ||
469 | SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
470 | SIM_CONTROL_SHAPING_TYPE_BITS + \ | ||
471 | SIM_CONTROL_SHAPING_UNITS_BITS)) | ||
472 | |||
473 | //! Computes the value to write to SPR_SIM_CONTROL to change a bitrate. | ||
474 | //! | ||
475 | #define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \ | ||
476 | (SIM_CONTROL_SHAPING | \ | ||
477 | ((shim) | \ | ||
478 | ((type) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS)) | \ | ||
479 | ((units) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
480 | SIM_CONTROL_SHAPING_TYPE_BITS)) | \ | ||
481 | ((rate) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
482 | SIM_CONTROL_SHAPING_TYPE_BITS + \ | ||
483 | SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS) | ||
484 | |||
485 | |||
486 | //== Values returned when reading SPR_SIM_CONTROL. | ||
487 | // ISSUE: These names should share a longer common prefix. | ||
488 | |||
489 | //! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits | ||
490 | //! (SIM_TRACE_xxx values). | ||
491 | //! | ||
492 | #define SIM_TRACE_FLAG_MASK 0xFFFF | ||
493 | |||
494 | //! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. | ||
495 | //! | ||
496 | #define SIM_PROFILER_ENABLED_MASK 0x10000 | ||
497 | |||
498 | |||
499 | //== Special arguments for "SIM_CONTROL_PUTC". | ||
500 | |||
501 | //! Flag value for forcing a PUTC string-flush, including | ||
502 | //! coordinate/cycle prefix and newline. | ||
503 | //! | ||
504 | #define SIM_PUTC_FLUSH_STRING 0x100 | ||
505 | |||
506 | //! Flag value for forcing a PUTC binary-data-flush, which skips the | ||
507 | //! prefix and does not append a newline. | ||
508 | //! | ||
509 | #define SIM_PUTC_FLUSH_BINARY 0x101 | ||
510 | |||
511 | |||
512 | #endif //__ARCH_SIM_DEF_H__ | ||
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h new file mode 100644 index 000000000000..c8fdbd9a45e6 --- /dev/null +++ b/arch/tile/include/arch/spr_def.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifdef __tilegx__ | ||
16 | #include <arch/spr_def_64.h> | ||
17 | #else | ||
18 | #include <arch/spr_def_32.h> | ||
19 | #endif | ||
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h new file mode 100644 index 000000000000..b4fc06864df6 --- /dev/null +++ b/arch/tile/include/arch/spr_def_32.h | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef __DOXYGEN__ | ||
16 | |||
17 | #ifndef __ARCH_SPR_DEF_H__ | ||
18 | #define __ARCH_SPR_DEF_H__ | ||
19 | |||
20 | #define SPR_AUX_PERF_COUNT_0 0x6005 | ||
21 | #define SPR_AUX_PERF_COUNT_1 0x6006 | ||
22 | #define SPR_AUX_PERF_COUNT_CTL 0x6007 | ||
23 | #define SPR_AUX_PERF_COUNT_STS 0x6008 | ||
24 | #define SPR_CYCLE_HIGH 0x4e06 | ||
25 | #define SPR_CYCLE_LOW 0x4e07 | ||
26 | #define SPR_DMA_BYTE 0x3900 | ||
27 | #define SPR_DMA_CHUNK_SIZE 0x3901 | ||
28 | #define SPR_DMA_CTR 0x3902 | ||
29 | #define SPR_DMA_CTR__REQUEST_MASK 0x1 | ||
30 | #define SPR_DMA_CTR__SUSPEND_MASK 0x2 | ||
31 | #define SPR_DMA_DST_ADDR 0x3903 | ||
32 | #define SPR_DMA_DST_CHUNK_ADDR 0x3904 | ||
33 | #define SPR_DMA_SRC_ADDR 0x3905 | ||
34 | #define SPR_DMA_SRC_CHUNK_ADDR 0x3906 | ||
35 | #define SPR_DMA_STATUS__DONE_MASK 0x1 | ||
36 | #define SPR_DMA_STATUS__BUSY_MASK 0x2 | ||
37 | #define SPR_DMA_STATUS__RUNNING_MASK 0x10 | ||
38 | #define SPR_DMA_STRIDE 0x3907 | ||
39 | #define SPR_DMA_USER_STATUS 0x3908 | ||
40 | #define SPR_DONE 0x4e08 | ||
41 | #define SPR_EVENT_BEGIN 0x4e0d | ||
42 | #define SPR_EVENT_END 0x4e0e | ||
43 | #define SPR_EX_CONTEXT_0_0 0x4a05 | ||
44 | #define SPR_EX_CONTEXT_0_1 0x4a06 | ||
45 | #define SPR_EX_CONTEXT_0_1__PL_SHIFT 0 | ||
46 | #define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3 | ||
47 | #define SPR_EX_CONTEXT_0_1__PL_MASK 0x3 | ||
48 | #define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2 | ||
49 | #define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1 | ||
50 | #define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4 | ||
51 | #define SPR_EX_CONTEXT_1_0 0x4805 | ||
52 | #define SPR_EX_CONTEXT_1_1 0x4806 | ||
53 | #define SPR_EX_CONTEXT_1_1__PL_SHIFT 0 | ||
54 | #define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3 | ||
55 | #define SPR_EX_CONTEXT_1_1__PL_MASK 0x3 | ||
56 | #define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 | ||
57 | #define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 | ||
58 | #define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 | ||
59 | #define SPR_FAIL 0x4e09 | ||
60 | #define SPR_INTCTRL_0_STATUS 0x4a07 | ||
61 | #define SPR_INTCTRL_1_STATUS 0x4807 | ||
62 | #define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a | ||
63 | #define SPR_INTERRUPT_MASK_0_0 0x4a08 | ||
64 | #define SPR_INTERRUPT_MASK_0_1 0x4a09 | ||
65 | #define SPR_INTERRUPT_MASK_1_0 0x4809 | ||
66 | #define SPR_INTERRUPT_MASK_1_1 0x480a | ||
67 | #define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a | ||
68 | #define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b | ||
69 | #define SPR_INTERRUPT_MASK_RESET_1_0 0x480b | ||
70 | #define SPR_INTERRUPT_MASK_RESET_1_1 0x480c | ||
71 | #define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c | ||
72 | #define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d | ||
73 | #define SPR_INTERRUPT_MASK_SET_1_0 0x480d | ||
74 | #define SPR_INTERRUPT_MASK_SET_1_1 0x480e | ||
75 | #define SPR_MPL_DMA_CPL_SET_0 0x5800 | ||
76 | #define SPR_MPL_DMA_CPL_SET_1 0x5801 | ||
77 | #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 | ||
78 | #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 | ||
79 | #define SPR_MPL_INTCTRL_0_SET_0 0x4a00 | ||
80 | #define SPR_MPL_INTCTRL_0_SET_1 0x4a01 | ||
81 | #define SPR_MPL_INTCTRL_1_SET_0 0x4800 | ||
82 | #define SPR_MPL_INTCTRL_1_SET_1 0x4801 | ||
83 | #define SPR_MPL_SN_ACCESS_SET_0 0x0800 | ||
84 | #define SPR_MPL_SN_ACCESS_SET_1 0x0801 | ||
85 | #define SPR_MPL_SN_CPL_SET_0 0x5a00 | ||
86 | #define SPR_MPL_SN_CPL_SET_1 0x5a01 | ||
87 | #define SPR_MPL_SN_FIREWALL_SET_0 0x2c00 | ||
88 | #define SPR_MPL_SN_FIREWALL_SET_1 0x2c01 | ||
89 | #define SPR_MPL_SN_NOTIFY_SET_0 0x2a00 | ||
90 | #define SPR_MPL_SN_NOTIFY_SET_1 0x2a01 | ||
91 | #define SPR_MPL_UDN_ACCESS_SET_0 0x0c00 | ||
92 | #define SPR_MPL_UDN_ACCESS_SET_1 0x0c01 | ||
93 | #define SPR_MPL_UDN_AVAIL_SET_0 0x4000 | ||
94 | #define SPR_MPL_UDN_AVAIL_SET_1 0x4001 | ||
95 | #define SPR_MPL_UDN_CA_SET_0 0x3c00 | ||
96 | #define SPR_MPL_UDN_CA_SET_1 0x3c01 | ||
97 | #define SPR_MPL_UDN_COMPLETE_SET_0 0x1400 | ||
98 | #define SPR_MPL_UDN_COMPLETE_SET_1 0x1401 | ||
99 | #define SPR_MPL_UDN_FIREWALL_SET_0 0x3000 | ||
100 | #define SPR_MPL_UDN_FIREWALL_SET_1 0x3001 | ||
101 | #define SPR_MPL_UDN_REFILL_SET_0 0x1000 | ||
102 | #define SPR_MPL_UDN_REFILL_SET_1 0x1001 | ||
103 | #define SPR_MPL_UDN_TIMER_SET_0 0x3600 | ||
104 | #define SPR_MPL_UDN_TIMER_SET_1 0x3601 | ||
105 | #define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00 | ||
106 | #define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01 | ||
107 | #define SPR_PASS 0x4e0b | ||
108 | #define SPR_PERF_COUNT_0 0x4205 | ||
109 | #define SPR_PERF_COUNT_1 0x4206 | ||
110 | #define SPR_PERF_COUNT_CTL 0x4207 | ||
111 | #define SPR_PERF_COUNT_STS 0x4208 | ||
112 | #define SPR_PROC_STATUS 0x4f00 | ||
113 | #define SPR_SIM_CONTROL 0x4e0c | ||
114 | #define SPR_SNCTL 0x0805 | ||
115 | #define SPR_SNCTL__FRZFABRIC_MASK 0x1 | ||
116 | #define SPR_SNCTL__FRZPROC_MASK 0x2 | ||
117 | #define SPR_SNPC 0x080b | ||
118 | #define SPR_SNSTATIC 0x080c | ||
119 | #define SPR_SYSTEM_SAVE_0_0 0x4b00 | ||
120 | #define SPR_SYSTEM_SAVE_0_1 0x4b01 | ||
121 | #define SPR_SYSTEM_SAVE_0_2 0x4b02 | ||
122 | #define SPR_SYSTEM_SAVE_0_3 0x4b03 | ||
123 | #define SPR_SYSTEM_SAVE_1_0 0x4900 | ||
124 | #define SPR_SYSTEM_SAVE_1_1 0x4901 | ||
125 | #define SPR_SYSTEM_SAVE_1_2 0x4902 | ||
126 | #define SPR_SYSTEM_SAVE_1_3 0x4903 | ||
127 | #define SPR_TILE_COORD 0x4c17 | ||
128 | #define SPR_TILE_RTF_HWM 0x4e10 | ||
129 | #define SPR_TILE_TIMER_CONTROL 0x3205 | ||
130 | #define SPR_TILE_WRITE_PENDING 0x4e0f | ||
131 | #define SPR_UDN_AVAIL_EN 0x4005 | ||
132 | #define SPR_UDN_CA_DATA 0x0d00 | ||
133 | #define SPR_UDN_DATA_AVAIL 0x0d03 | ||
134 | #define SPR_UDN_DEADLOCK_TIMEOUT 0x3606 | ||
135 | #define SPR_UDN_DEMUX_CA_COUNT 0x0c05 | ||
136 | #define SPR_UDN_DEMUX_COUNT_0 0x0c06 | ||
137 | #define SPR_UDN_DEMUX_COUNT_1 0x0c07 | ||
138 | #define SPR_UDN_DEMUX_COUNT_2 0x0c08 | ||
139 | #define SPR_UDN_DEMUX_COUNT_3 0x0c09 | ||
140 | #define SPR_UDN_DEMUX_CTL 0x0c0a | ||
141 | #define SPR_UDN_DEMUX_QUEUE_SEL 0x0c0c | ||
142 | #define SPR_UDN_DEMUX_STATUS 0x0c0d | ||
143 | #define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e | ||
144 | #define SPR_UDN_DIRECTION_PROTECT 0x3005 | ||
145 | #define SPR_UDN_REFILL_EN 0x1005 | ||
146 | #define SPR_UDN_SP_FIFO_DATA 0x0c11 | ||
147 | #define SPR_UDN_SP_FIFO_SEL 0x0c12 | ||
148 | #define SPR_UDN_SP_FREEZE 0x0c13 | ||
149 | #define SPR_UDN_SP_FREEZE__SP_FRZ_MASK 0x1 | ||
150 | #define SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2 | ||
151 | #define SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4 | ||
152 | #define SPR_UDN_SP_STATE 0x0c14 | ||
153 | #define SPR_UDN_TAG_0 0x0c15 | ||
154 | #define SPR_UDN_TAG_1 0x0c16 | ||
155 | #define SPR_UDN_TAG_2 0x0c17 | ||
156 | #define SPR_UDN_TAG_3 0x0c18 | ||
157 | #define SPR_UDN_TAG_VALID 0x0c19 | ||
158 | #define SPR_UDN_TILE_COORD 0x0c1a | ||
159 | |||
160 | #endif /* !defined(__ARCH_SPR_DEF_H__) */ | ||
161 | |||
162 | #endif /* !defined(__DOXYGEN__) */ | ||
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild new file mode 100644 index 000000000000..3b8f55b82dee --- /dev/null +++ b/arch/tile/include/asm/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | |||
3 | header-y += ucontext.h | ||
diff --git a/arch/tile/include/asm/asm-offsets.h b/arch/tile/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/tile/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h new file mode 100644 index 000000000000..b8c49f98a44c --- /dev/null +++ b/arch/tile/include/asm/atomic.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Atomic primitives. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_H | ||
18 | #define _ASM_TILE_ATOMIC_H | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #define ATOMIC_INIT(i) { (i) } | ||
26 | |||
27 | /** | ||
28 | * atomic_read - read atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * | ||
31 | * Atomically reads the value of @v. | ||
32 | */ | ||
33 | static inline int atomic_read(const atomic_t *v) | ||
34 | { | ||
35 | return v->counter; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_sub_return - subtract integer and return | ||
40 | * @v: pointer of type atomic_t | ||
41 | * @i: integer value to subtract | ||
42 | * | ||
43 | * Atomically subtracts @i from @v and returns @v - @i | ||
44 | */ | ||
45 | #define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) | ||
46 | |||
47 | /** | ||
48 | * atomic_sub - subtract integer from atomic variable | ||
49 | * @i: integer value to subtract | ||
50 | * @v: pointer of type atomic_t | ||
51 | * | ||
52 | * Atomically subtracts @i from @v. | ||
53 | */ | ||
54 | #define atomic_sub(i, v) atomic_add((int)(-(i)), (v)) | ||
55 | |||
56 | /** | ||
57 | * atomic_sub_and_test - subtract value from variable and test result | ||
58 | * @i: integer value to subtract | ||
59 | * @v: pointer of type atomic_t | ||
60 | * | ||
61 | * Atomically subtracts @i from @v and returns true if the result is | ||
62 | * zero, or false for all other cases. | ||
63 | */ | ||
64 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
65 | |||
66 | /** | ||
67 | * atomic_inc_return - increment memory and return | ||
68 | * @v: pointer of type atomic_t | ||
69 | * | ||
70 | * Atomically increments @v by 1 and returns the new value. | ||
71 | */ | ||
72 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
73 | |||
74 | /** | ||
75 | * atomic_dec_return - decrement memory and return | ||
76 | * @v: pointer of type atomic_t | ||
77 | * | ||
78 | * Atomically decrements @v by 1 and returns the new value. | ||
79 | */ | ||
80 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
81 | |||
82 | /** | ||
83 | * atomic_inc - increment atomic variable | ||
84 | * @v: pointer of type atomic_t | ||
85 | * | ||
86 | * Atomically increments @v by 1. | ||
87 | */ | ||
88 | #define atomic_inc(v) atomic_add(1, (v)) | ||
89 | |||
90 | /** | ||
91 | * atomic_dec - decrement atomic variable | ||
92 | * @v: pointer of type atomic_t | ||
93 | * | ||
94 | * Atomically decrements @v by 1. | ||
95 | */ | ||
96 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
97 | |||
98 | /** | ||
99 | * atomic_dec_and_test - decrement and test | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1 and returns true if the result is 0. | ||
103 | */ | ||
104 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
105 | |||
106 | /** | ||
107 | * atomic_inc_and_test - increment and test | ||
108 | * @v: pointer of type atomic_t | ||
109 | * | ||
110 | * Atomically increments @v by 1 and returns true if the result is 0. | ||
111 | */ | ||
112 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
113 | |||
114 | /** | ||
115 | * atomic_add_negative - add and test if negative | ||
116 | * @v: pointer of type atomic_t | ||
117 | * @i: integer value to add | ||
118 | * | ||
119 | * Atomically adds @i to @v and returns true if the result is | ||
120 | * negative, or false when result is greater than or equal to zero. | ||
121 | */ | ||
122 | #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) | ||
123 | |||
124 | /** | ||
125 | * atomic_inc_not_zero - increment unless the number is zero | ||
126 | * @v: pointer of type atomic_t | ||
127 | * | ||
128 | * Atomically increments @v by 1, so long as @v is non-zero. | ||
129 | * Returns non-zero if @v was non-zero, and zero otherwise. | ||
130 | */ | ||
131 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
132 | |||
133 | |||
134 | /* | ||
135 | * We define xchg() and cmpxchg() in the included headers. | ||
136 | * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply | ||
137 | * that cmpxchg() is an efficient operation, which is not particularly true. | ||
138 | */ | ||
139 | |||
140 | /* Nonexistent functions intended to cause link errors. */ | ||
141 | extern unsigned long __xchg_called_with_bad_pointer(void); | ||
142 | extern unsigned long __cmpxchg_called_with_bad_pointer(void); | ||
143 | |||
144 | #define tas(ptr) (xchg((ptr), 1)) | ||
145 | |||
146 | #endif /* __ASSEMBLY__ */ | ||
147 | |||
148 | #ifndef __tilegx__ | ||
149 | #include <asm/atomic_32.h> | ||
150 | #else | ||
151 | #include <asm/atomic_64.h> | ||
152 | #endif | ||
153 | |||
154 | /* Provide the appropriate atomic_long_t definitions. */ | ||
155 | #ifndef __ASSEMBLY__ | ||
156 | #include <asm-generic/atomic-long.h> | ||
157 | #endif | ||
158 | |||
159 | #endif /* _ASM_TILE_ATOMIC_H */ | ||
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h new file mode 100644 index 000000000000..40a5a3a876d9 --- /dev/null +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -0,0 +1,370 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Do not include directly; use <asm/atomic.h>. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | ||
18 | #define _ASM_TILE_ATOMIC_32_H | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | /* Tile-specific routines to support <asm/atomic.h>. */ | ||
25 | int _atomic_xchg(atomic_t *v, int n); | ||
26 | int _atomic_xchg_add(atomic_t *v, int i); | ||
27 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u); | ||
28 | int _atomic_cmpxchg(atomic_t *v, int o, int n); | ||
29 | |||
30 | /** | ||
31 | * atomic_xchg - atomically exchange contents of memory with a new value | ||
32 | * @v: pointer of type atomic_t | ||
33 | * @i: integer value to store in memory | ||
34 | * | ||
35 | * Atomically sets @v to @i and returns old @v | ||
36 | */ | ||
37 | static inline int atomic_xchg(atomic_t *v, int n) | ||
38 | { | ||
39 | smp_mb(); /* barrier for proper semantics */ | ||
40 | return _atomic_xchg(v, n); | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * atomic_cmpxchg - atomically exchange contents of memory if it matches | ||
45 | * @v: pointer of type atomic_t | ||
46 | * @o: old value that memory should have | ||
47 | * @n: new value to write to memory if it matches | ||
48 | * | ||
49 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
50 | * Returns the old value at @v. | ||
51 | */ | ||
52 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | ||
53 | { | ||
54 | smp_mb(); /* barrier for proper semantics */ | ||
55 | return _atomic_cmpxchg(v, o, n); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * atomic_add - add integer to atomic variable | ||
60 | * @i: integer value to add | ||
61 | * @v: pointer of type atomic_t | ||
62 | * | ||
63 | * Atomically adds @i to @v. | ||
64 | */ | ||
65 | static inline void atomic_add(int i, atomic_t *v) | ||
66 | { | ||
67 | _atomic_xchg_add(v, i); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * atomic_add_return - add integer and return | ||
72 | * @v: pointer of type atomic_t | ||
73 | * @i: integer value to add | ||
74 | * | ||
75 | * Atomically adds @i to @v and returns @i + @v | ||
76 | */ | ||
77 | static inline int atomic_add_return(int i, atomic_t *v) | ||
78 | { | ||
79 | smp_mb(); /* barrier for proper semantics */ | ||
80 | return _atomic_xchg_add(v, i) + i; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic_add_unless - add unless the number is already a given value | ||
85 | * @v: pointer of type atomic_t | ||
86 | * @a: the amount to add to v... | ||
87 | * @u: ...unless v is equal to u. | ||
88 | * | ||
89 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
90 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
91 | */ | ||
92 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
93 | { | ||
94 | smp_mb(); /* barrier for proper semantics */ | ||
95 | return _atomic_xchg_add_unless(v, a, u) != u; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_set - set atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * @i: required value | ||
102 | * | ||
103 | * Atomically sets the value of @v to @i. | ||
104 | * | ||
105 | * atomic_set() can't be just a raw store, since it would be lost if it | ||
106 | * fell between the load and store of one of the other atomic ops. | ||
107 | */ | ||
108 | static inline void atomic_set(atomic_t *v, int n) | ||
109 | { | ||
110 | _atomic_xchg(v, n); | ||
111 | } | ||
112 | |||
113 | #define xchg(ptr, x) ((typeof(*(ptr))) \ | ||
114 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
115 | atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ | ||
116 | __xchg_called_with_bad_pointer())) | ||
117 | |||
118 | #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \ | ||
119 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
120 | atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ | ||
121 | __cmpxchg_called_with_bad_pointer())) | ||
122 | |||
123 | /* A 64bit atomic type */ | ||
124 | |||
125 | typedef struct { | ||
126 | u64 __aligned(8) counter; | ||
127 | } atomic64_t; | ||
128 | |||
129 | #define ATOMIC64_INIT(val) { (val) } | ||
130 | |||
131 | u64 _atomic64_xchg(atomic64_t *v, u64 n); | ||
132 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i); | ||
133 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); | ||
134 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); | ||
135 | |||
136 | /** | ||
137 | * atomic64_read - read atomic variable | ||
138 | * @v: pointer of type atomic64_t | ||
139 | * | ||
140 | * Atomically reads the value of @v. | ||
141 | */ | ||
142 | static inline u64 atomic64_read(const atomic64_t *v) | ||
143 | { | ||
144 | /* | ||
145 | * Requires an atomic op to read both 32-bit parts consistently. | ||
146 | * Casting away const is safe since the atomic support routines | ||
147 | * do not write to memory if the value has not been modified. | ||
148 | */ | ||
149 | return _atomic64_xchg_add((atomic64_t *)v, 0); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * atomic64_xchg - atomically exchange contents of memory with a new value | ||
154 | * @v: pointer of type atomic64_t | ||
155 | * @i: integer value to store in memory | ||
156 | * | ||
157 | * Atomically sets @v to @i and returns old @v | ||
158 | */ | ||
159 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | ||
160 | { | ||
161 | smp_mb(); /* barrier for proper semantics */ | ||
162 | return _atomic64_xchg(v, n); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic64_cmpxchg - atomically exchange contents of memory if it matches | ||
167 | * @v: pointer of type atomic64_t | ||
168 | * @o: old value that memory should have | ||
169 | * @n: new value to write to memory if it matches | ||
170 | * | ||
171 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
172 | * Returns the old value at @v. | ||
173 | */ | ||
174 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||
175 | { | ||
176 | smp_mb(); /* barrier for proper semantics */ | ||
177 | return _atomic64_cmpxchg(v, o, n); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * atomic64_add - add integer to atomic variable | ||
182 | * @i: integer value to add | ||
183 | * @v: pointer of type atomic64_t | ||
184 | * | ||
185 | * Atomically adds @i to @v. | ||
186 | */ | ||
187 | static inline void atomic64_add(u64 i, atomic64_t *v) | ||
188 | { | ||
189 | _atomic64_xchg_add(v, i); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * atomic64_add_return - add integer and return | ||
194 | * @v: pointer of type atomic64_t | ||
195 | * @i: integer value to add | ||
196 | * | ||
197 | * Atomically adds @i to @v and returns @i + @v | ||
198 | */ | ||
199 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||
200 | { | ||
201 | smp_mb(); /* barrier for proper semantics */ | ||
202 | return _atomic64_xchg_add(v, i) + i; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * atomic64_add_unless - add unless the number is already a given value | ||
207 | * @v: pointer of type atomic64_t | ||
208 | * @a: the amount to add to v... | ||
209 | * @u: ...unless v is equal to u. | ||
210 | * | ||
211 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
212 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
213 | */ | ||
214 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||
215 | { | ||
216 | smp_mb(); /* barrier for proper semantics */ | ||
217 | return _atomic64_xchg_add_unless(v, a, u) != u; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * atomic64_set - set atomic variable | ||
222 | * @v: pointer of type atomic64_t | ||
223 | * @i: required value | ||
224 | * | ||
225 | * Atomically sets the value of @v to @i. | ||
226 | * | ||
227 | * atomic64_set() can't be just a raw store, since it would be lost if it | ||
228 | * fell between the load and store of one of the other atomic ops. | ||
229 | */ | ||
230 | static inline void atomic64_set(atomic64_t *v, u64 n) | ||
231 | { | ||
232 | _atomic64_xchg(v, n); | ||
233 | } | ||
234 | |||
235 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
236 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | ||
237 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | ||
238 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
239 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | ||
240 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | ||
241 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | ||
242 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | ||
243 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | ||
244 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
245 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | ||
246 | |||
247 | /* | ||
248 | * We need to barrier before modifying the word, since the _atomic_xxx() | ||
249 | * routines just tns the lock and then read/modify/write of the word. | ||
250 | * But after the word is updated, the routine issues an "mf" before returning, | ||
251 | * and since it's a function call, we don't even need a compiler barrier. | ||
252 | */ | ||
253 | #define smp_mb__before_atomic_dec() smp_mb() | ||
254 | #define smp_mb__before_atomic_inc() smp_mb() | ||
255 | #define smp_mb__after_atomic_dec() do { } while (0) | ||
256 | #define smp_mb__after_atomic_inc() do { } while (0) | ||
257 | |||
258 | |||
259 | /* | ||
260 | * Support "tns" atomic integers. These are atomic integers that can | ||
261 | * hold any value but "1". They are more efficient than regular atomic | ||
262 | * operations because the "lock" (aka acquire) step is a single "tns" | ||
263 | * in the uncontended case, and the "unlock" (aka release) step is a | ||
264 | * single "store" without an mf. (However, note that on tilepro the | ||
265 | * "tns" will evict the local cache line, so it's not all upside.) | ||
266 | * | ||
267 | * Note that you can ONLY observe the value stored in the pointer | ||
268 | * using these operations; a direct read of the value may confusingly | ||
269 | * return the special value "1". | ||
270 | */ | ||
271 | |||
272 | int __tns_atomic_acquire(atomic_t *); | ||
273 | void __tns_atomic_release(atomic_t *p, int v); | ||
274 | |||
275 | static inline void tns_atomic_set(atomic_t *v, int i) | ||
276 | { | ||
277 | __tns_atomic_acquire(v); | ||
278 | __tns_atomic_release(v, i); | ||
279 | } | ||
280 | |||
281 | static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) | ||
282 | { | ||
283 | int ret = __tns_atomic_acquire(v); | ||
284 | __tns_atomic_release(v, (ret == o) ? n : ret); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static inline int tns_atomic_xchg(atomic_t *v, int n) | ||
289 | { | ||
290 | int ret = __tns_atomic_acquire(v); | ||
291 | __tns_atomic_release(v, n); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | #endif /* !__ASSEMBLY__ */ | ||
296 | |||
297 | /* | ||
298 | * Internal definitions only beyond this point. | ||
299 | */ | ||
300 | |||
301 | #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \ | ||
302 | (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP)) | ||
303 | |||
304 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
305 | |||
306 | /* Number of entries in atomic_lock_ptr[]. */ | ||
307 | #define ATOMIC_HASH_L1_SHIFT 6 | ||
308 | #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT) | ||
309 | |||
310 | /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */ | ||
311 | #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2) | ||
312 | #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT) | ||
313 | |||
314 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
315 | |||
316 | /* | ||
317 | * Number of atomic locks in atomic_locks[]. Must be a power of two. | ||
318 | * There is no reason for more than PAGE_SIZE / 8 entries, since that | ||
319 | * is the maximum number of pointer bits we can use to index this. | ||
320 | * And we cannot have more than PAGE_SIZE / 4, since this has to | ||
321 | * fit on a single page and each entry takes 4 bytes. | ||
322 | */ | ||
323 | #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) | ||
324 | #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) | ||
325 | |||
326 | #ifndef __ASSEMBLY__ | ||
327 | extern int atomic_locks[]; | ||
328 | #endif | ||
329 | |||
330 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
331 | |||
332 | /* | ||
333 | * All the code that may fault while holding an atomic lock must | ||
334 | * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code | ||
335 | * can correctly release and reacquire the lock. Note that we | ||
336 | * mention the register number in a comment in "lib/atomic_asm.S" to help | ||
337 | * assembly coders from using this register by mistake, so if it | ||
338 | * is changed here, change that comment as well. | ||
339 | */ | ||
340 | #define ATOMIC_LOCK_REG 20 | ||
341 | #define ATOMIC_LOCK_REG_NAME r20 | ||
342 | |||
343 | #ifndef __ASSEMBLY__ | ||
344 | /* Called from setup to initialize a hash table to point to per_cpu locks. */ | ||
345 | void __init_atomic_per_cpu(void); | ||
346 | |||
347 | #ifdef CONFIG_SMP | ||
348 | /* Support releasing the atomic lock in do_page_fault_ics(). */ | ||
349 | void __atomic_fault_unlock(int *lock_ptr); | ||
350 | #endif | ||
351 | |||
352 | /* Private helper routines in lib/atomic_asm_32.S */ | ||
353 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | ||
354 | int *lock, int o, int n); | ||
355 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | ||
356 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | ||
357 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | ||
358 | int *lock, int o, int n); | ||
359 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | ||
360 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | ||
361 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | ||
362 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | ||
363 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | ||
364 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | ||
365 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | ||
366 | int *lock, u64 o, u64 n); | ||
367 | |||
368 | #endif /* !__ASSEMBLY__ */ | ||
369 | |||
370 | #endif /* _ASM_TILE_ATOMIC_32_H */ | ||
diff --git a/arch/tile/include/asm/auxvec.h b/arch/tile/include/asm/auxvec.h new file mode 100644 index 000000000000..1d393edb0641 --- /dev/null +++ b/arch/tile/include/asm/auxvec.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_AUXVEC_H | ||
16 | #define _ASM_TILE_AUXVEC_H | ||
17 | |||
18 | /* No extensions to auxvec */ | ||
19 | |||
20 | #endif /* _ASM_TILE_AUXVEC_H */ | ||
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h new file mode 100644 index 000000000000..6970bfcad549 --- /dev/null +++ b/arch/tile/include/asm/backtrace.h | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _TILE_BACKTRACE_H | ||
16 | #define _TILE_BACKTRACE_H | ||
17 | |||
18 | |||
19 | |||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #if CHIP_VA_WIDTH() > 32 | ||
25 | typedef unsigned long long VirtualAddress; | ||
26 | #else | ||
27 | typedef unsigned int VirtualAddress; | ||
28 | #endif | ||
29 | |||
30 | |||
31 | /** Reads 'size' bytes from 'address' and writes the data to 'result'. | ||
32 | * Returns true if successful, else false (e.g. memory not readable). | ||
33 | */ | ||
34 | typedef bool (*BacktraceMemoryReader)(void *result, | ||
35 | VirtualAddress address, | ||
36 | unsigned int size, | ||
37 | void *extra); | ||
38 | |||
39 | typedef struct { | ||
40 | /** Current PC. */ | ||
41 | VirtualAddress pc; | ||
42 | |||
43 | /** Current stack pointer value. */ | ||
44 | VirtualAddress sp; | ||
45 | |||
46 | /** Current frame pointer value (i.e. caller's stack pointer) */ | ||
47 | VirtualAddress fp; | ||
48 | |||
49 | /** Internal use only: caller's PC for first frame. */ | ||
50 | VirtualAddress initial_frame_caller_pc; | ||
51 | |||
52 | /** Internal use only: callback to read memory. */ | ||
53 | BacktraceMemoryReader read_memory_func; | ||
54 | |||
55 | /** Internal use only: arbitrary argument to read_memory_func. */ | ||
56 | void *read_memory_func_extra; | ||
57 | |||
58 | } BacktraceIterator; | ||
59 | |||
60 | |||
61 | /** Initializes a backtracer to start from the given location. | ||
62 | * | ||
63 | * If the frame pointer cannot be determined it is set to -1. | ||
64 | * | ||
65 | * @param state The state to be filled in. | ||
66 | * @param read_memory_func A callback that reads memory. If NULL, a default | ||
67 | * value is provided. | ||
68 | * @param read_memory_func_extra An arbitrary argument to read_memory_func. | ||
69 | * @param pc The current PC. | ||
70 | * @param lr The current value of the 'lr' register. | ||
71 | * @param sp The current value of the 'sp' register. | ||
72 | * @param r52 The current value of the 'r52' register. | ||
73 | */ | ||
74 | extern void backtrace_init(BacktraceIterator *state, | ||
75 | BacktraceMemoryReader read_memory_func, | ||
76 | void *read_memory_func_extra, | ||
77 | VirtualAddress pc, VirtualAddress lr, | ||
78 | VirtualAddress sp, VirtualAddress r52); | ||
79 | |||
80 | |||
81 | /** Advances the backtracing state to the calling frame, returning | ||
82 | * true iff successful. | ||
83 | */ | ||
84 | extern bool backtrace_next(BacktraceIterator *state); | ||
85 | |||
86 | |||
87 | typedef enum { | ||
88 | |||
89 | /* We have no idea what the caller's pc is. */ | ||
90 | PC_LOC_UNKNOWN, | ||
91 | |||
92 | /* The caller's pc is currently in lr. */ | ||
93 | PC_LOC_IN_LR, | ||
94 | |||
95 | /* The caller's pc can be found by dereferencing the caller's sp. */ | ||
96 | PC_LOC_ON_STACK | ||
97 | |||
98 | } CallerPCLocation; | ||
99 | |||
100 | |||
101 | typedef enum { | ||
102 | |||
103 | /* We have no idea what the caller's sp is. */ | ||
104 | SP_LOC_UNKNOWN, | ||
105 | |||
106 | /* The caller's sp is currently in r52. */ | ||
107 | SP_LOC_IN_R52, | ||
108 | |||
109 | /* The caller's sp can be found by adding a certain constant | ||
110 | * to the current value of sp. | ||
111 | */ | ||
112 | SP_LOC_OFFSET | ||
113 | |||
114 | } CallerSPLocation; | ||
115 | |||
116 | |||
117 | /* Bit values ORed into CALLER_* values for info ops. */ | ||
118 | enum { | ||
119 | /* Setting the low bit on any of these values means the info op | ||
120 | * applies only to one bundle ago. | ||
121 | */ | ||
122 | ONE_BUNDLE_AGO_FLAG = 1, | ||
123 | |||
124 | /* Setting this bit on a CALLER_SP_* value means the PC is in LR. | ||
125 | * If not set, PC is on the stack. | ||
126 | */ | ||
127 | PC_IN_LR_FLAG = 2, | ||
128 | |||
129 | /* This many of the low bits of a CALLER_SP_* value are for the | ||
130 | * flag bits above. | ||
131 | */ | ||
132 | NUM_INFO_OP_FLAGS = 2, | ||
133 | |||
134 | /* We cannot have one in the memory pipe so this is the maximum. */ | ||
135 | MAX_INFO_OPS_PER_BUNDLE = 2 | ||
136 | }; | ||
137 | |||
138 | |||
139 | /** Internal constants used to define 'info' operands. */ | ||
140 | enum { | ||
141 | /* 0 and 1 are reserved, as are all negative numbers. */ | ||
142 | |||
143 | CALLER_UNKNOWN_BASE = 2, | ||
144 | |||
145 | CALLER_SP_IN_R52_BASE = 4, | ||
146 | |||
147 | CALLER_SP_OFFSET_BASE = 8 | ||
148 | }; | ||
149 | |||
150 | |||
151 | /** Current backtracer state describing where it thinks the caller is. */ | ||
152 | typedef struct { | ||
153 | /* | ||
154 | * Public fields | ||
155 | */ | ||
156 | |||
157 | /* How do we find the caller's PC? */ | ||
158 | CallerPCLocation pc_location : 8; | ||
159 | |||
160 | /* How do we find the caller's SP? */ | ||
161 | CallerSPLocation sp_location : 8; | ||
162 | |||
163 | /* If sp_location == SP_LOC_OFFSET, then caller_sp == sp + | ||
164 | * loc->sp_offset. Else this field is undefined. | ||
165 | */ | ||
166 | uint16_t sp_offset; | ||
167 | |||
168 | /* In the most recently visited bundle a terminating bundle? */ | ||
169 | bool at_terminating_bundle; | ||
170 | |||
171 | /* | ||
172 | * Private fields | ||
173 | */ | ||
174 | |||
175 | /* Will the forward scanner see someone clobbering sp | ||
176 | * (i.e. changing it with something other than addi sp, sp, N?) | ||
177 | */ | ||
178 | bool sp_clobber_follows; | ||
179 | |||
180 | /* Operand to next "visible" info op (no more than one bundle past | ||
181 | * the next terminating bundle), or -32768 if none. | ||
182 | */ | ||
183 | int16_t next_info_operand; | ||
184 | |||
185 | /* Is the info of in next_info_op in the very next bundle? */ | ||
186 | bool is_next_info_operand_adjacent; | ||
187 | |||
188 | } CallerLocation; | ||
189 | |||
190 | |||
191 | |||
192 | |||
193 | #endif /* _TILE_BACKTRACE_H */ | ||
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h new file mode 100644 index 000000000000..84600f3514da --- /dev/null +++ b/arch/tile/include/asm/bitops.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 1992, Linus Torvalds. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_BITOPS_H | ||
17 | #define _ASM_TILE_BITOPS_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | #ifndef _LINUX_BITOPS_H | ||
22 | #error only <linux/bitops.h> can be included directly | ||
23 | #endif | ||
24 | |||
25 | #ifdef __tilegx__ | ||
26 | #include <asm/bitops_64.h> | ||
27 | #else | ||
28 | #include <asm/bitops_32.h> | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * __ffs - find first set bit in word | ||
33 | * @word: The word to search | ||
34 | * | ||
35 | * Undefined if no set bit exists, so code should check against 0 first. | ||
36 | */ | ||
37 | static inline unsigned long __ffs(unsigned long word) | ||
38 | { | ||
39 | return __builtin_ctzl(word); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * ffz - find first zero bit in word | ||
44 | * @word: The word to search | ||
45 | * | ||
46 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
47 | */ | ||
48 | static inline unsigned long ffz(unsigned long word) | ||
49 | { | ||
50 | return __builtin_ctzl(~word); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __fls - find last set bit in word | ||
55 | * @word: The word to search | ||
56 | * | ||
57 | * Undefined if no set bit exists, so code should check against 0 first. | ||
58 | */ | ||
59 | static inline unsigned long __fls(unsigned long word) | ||
60 | { | ||
61 | return (sizeof(word) * 8) - 1 - __builtin_clzl(word); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * ffs - find first set bit in word | ||
66 | * @x: the word to search | ||
67 | * | ||
68 | * This is defined the same way as the libc and compiler builtin ffs | ||
69 | * routines, therefore differs in spirit from the other bitops. | ||
70 | * | ||
71 | * ffs(value) returns 0 if value is 0 or the position of the first | ||
72 | * set bit if value is nonzero. The first (least significant) bit | ||
73 | * is at position 1. | ||
74 | */ | ||
75 | static inline int ffs(int x) | ||
76 | { | ||
77 | return __builtin_ffs(x); | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * fls - find last set bit in word | ||
82 | * @x: the word to search | ||
83 | * | ||
84 | * This is defined in a similar way as the libc and compiler builtin | ||
85 | * ffs, but returns the position of the most significant set bit. | ||
86 | * | ||
87 | * fls(value) returns 0 if value is 0 or the position of the last | ||
88 | * set bit if value is nonzero. The last (most significant) bit is | ||
89 | * at position 32. | ||
90 | */ | ||
91 | static inline int fls(int x) | ||
92 | { | ||
93 | return (sizeof(int) * 8) - __builtin_clz(x); | ||
94 | } | ||
95 | |||
96 | static inline int fls64(__u64 w) | ||
97 | { | ||
98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | ||
99 | } | ||
100 | |||
101 | static inline unsigned int hweight32(unsigned int w) | ||
102 | { | ||
103 | return __builtin_popcount(w); | ||
104 | } | ||
105 | |||
106 | static inline unsigned int hweight16(unsigned int w) | ||
107 | { | ||
108 | return __builtin_popcount(w & 0xffff); | ||
109 | } | ||
110 | |||
111 | static inline unsigned int hweight8(unsigned int w) | ||
112 | { | ||
113 | return __builtin_popcount(w & 0xff); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long hweight64(__u64 w) | ||
117 | { | ||
118 | return __builtin_popcountll(w); | ||
119 | } | ||
120 | |||
121 | #include <asm-generic/bitops/lock.h> | ||
122 | #include <asm-generic/bitops/sched.h> | ||
123 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
124 | #include <asm-generic/bitops/minix.h> | ||
125 | |||
126 | #endif /* _ASM_TILE_BITOPS_H */ | ||
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h new file mode 100644 index 000000000000..7a93c001ac19 --- /dev/null +++ b/arch/tile/include/asm/bitops_32.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITOPS_32_H | ||
16 | #define _ASM_TILE_BITOPS_32_H | ||
17 | |||
18 | #include <linux/compiler.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/system.h> | ||
21 | |||
22 | /* Tile-specific routines to support <asm/bitops.h>. */ | ||
23 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); | ||
24 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask); | ||
25 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); | ||
26 | |||
27 | /** | ||
28 | * set_bit - Atomically set a bit in memory | ||
29 | * @nr: the bit to set | ||
30 | * @addr: the address to start counting from | ||
31 | * | ||
32 | * This function is atomic and may not be reordered. | ||
33 | * See __set_bit() if you do not require the atomic guarantees. | ||
34 | * Note that @nr may be almost arbitrarily large; this function is not | ||
35 | * restricted to acting on a single-word quantity. | ||
36 | */ | ||
37 | static inline void set_bit(unsigned nr, volatile unsigned long *addr) | ||
38 | { | ||
39 | _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * clear_bit - Clears a bit in memory | ||
44 | * @nr: Bit to clear | ||
45 | * @addr: Address to start counting from | ||
46 | * | ||
47 | * clear_bit() is atomic and may not be reordered. | ||
48 | * See __clear_bit() if you do not require the atomic guarantees. | ||
49 | * Note that @nr may be almost arbitrarily large; this function is not | ||
50 | * restricted to acting on a single-word quantity. | ||
51 | * | ||
52 | * clear_bit() may not contain a memory barrier, so if it is used for | ||
53 | * locking purposes, you should call smp_mb__before_clear_bit() and/or | ||
54 | * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. | ||
55 | */ | ||
56 | static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | ||
57 | { | ||
58 | _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * change_bit - Toggle a bit in memory | ||
63 | * @nr: Bit to change | ||
64 | * @addr: Address to start counting from | ||
65 | * | ||
66 | * change_bit() is atomic and may not be reordered. | ||
67 | * See __change_bit() if you do not require the atomic guarantees. | ||
68 | * Note that @nr may be almost arbitrarily large; this function is not | ||
69 | * restricted to acting on a single-word quantity. | ||
70 | */ | ||
71 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) | ||
72 | { | ||
73 | _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * test_and_set_bit - Set a bit and return its old value | ||
78 | * @nr: Bit to set | ||
79 | * @addr: Address to count from | ||
80 | * | ||
81 | * This operation is atomic and cannot be reordered. | ||
82 | * It also implies a memory barrier. | ||
83 | */ | ||
84 | static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) | ||
85 | { | ||
86 | unsigned long mask = BIT_MASK(nr); | ||
87 | addr += BIT_WORD(nr); | ||
88 | smp_mb(); /* barrier for proper semantics */ | ||
89 | return (_atomic_or(addr, mask) & mask) != 0; | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * test_and_clear_bit - Clear a bit and return its old value | ||
94 | * @nr: Bit to clear | ||
95 | * @addr: Address to count from | ||
96 | * | ||
97 | * This operation is atomic and cannot be reordered. | ||
98 | * It also implies a memory barrier. | ||
99 | */ | ||
100 | static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) | ||
101 | { | ||
102 | unsigned long mask = BIT_MASK(nr); | ||
103 | addr += BIT_WORD(nr); | ||
104 | smp_mb(); /* barrier for proper semantics */ | ||
105 | return (_atomic_andn(addr, mask) & mask) != 0; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * test_and_change_bit - Change a bit and return its old value | ||
110 | * @nr: Bit to change | ||
111 | * @addr: Address to count from | ||
112 | * | ||
113 | * This operation is atomic and cannot be reordered. | ||
114 | * It also implies a memory barrier. | ||
115 | */ | ||
116 | static inline int test_and_change_bit(unsigned nr, | ||
117 | volatile unsigned long *addr) | ||
118 | { | ||
119 | unsigned long mask = BIT_MASK(nr); | ||
120 | addr += BIT_WORD(nr); | ||
121 | smp_mb(); /* barrier for proper semantics */ | ||
122 | return (_atomic_xor(addr, mask) & mask) != 0; | ||
123 | } | ||
124 | |||
125 | /* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */ | ||
126 | #define smp_mb__before_clear_bit() smp_mb() | ||
127 | #define smp_mb__after_clear_bit() do {} while (0) | ||
128 | |||
129 | #include <asm-generic/bitops/non-atomic.h> | ||
130 | #include <asm-generic/bitops/ext2-atomic.h> | ||
131 | |||
132 | #endif /* _ASM_TILE_BITOPS_32_H */ | ||
diff --git a/arch/tile/include/asm/bitsperlong.h b/arch/tile/include/asm/bitsperlong.h new file mode 100644 index 000000000000..58c771f2af2f --- /dev/null +++ b/arch/tile/include/asm/bitsperlong.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITSPERLONG_H | ||
16 | #define _ASM_TILE_BITSPERLONG_H | ||
17 | |||
18 | #ifdef __LP64__ | ||
19 | # define __BITS_PER_LONG 64 | ||
20 | #else | ||
21 | # define __BITS_PER_LONG 32 | ||
22 | #endif | ||
23 | |||
24 | #include <asm-generic/bitsperlong.h> | ||
25 | |||
26 | #endif /* _ASM_TILE_BITSPERLONG_H */ | ||
diff --git a/arch/tile/include/asm/bug.h b/arch/tile/include/asm/bug.h new file mode 100644 index 000000000000..b12fd89e42e9 --- /dev/null +++ b/arch/tile/include/asm/bug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bug.h> | |||
diff --git a/arch/tile/include/asm/bugs.h b/arch/tile/include/asm/bugs.h new file mode 100644 index 000000000000..61791e1ad9f5 --- /dev/null +++ b/arch/tile/include/asm/bugs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bugs.h> | |||
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h new file mode 100644 index 000000000000..9558416d578b --- /dev/null +++ b/arch/tile/include/asm/byteorder.h | |||
@@ -0,0 +1 @@ | |||
#include <linux/byteorder/little_endian.h> | |||
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h new file mode 100644 index 000000000000..f6101840c9e7 --- /dev/null +++ b/arch/tile/include/asm/cache.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHE_H | ||
16 | #define _ASM_TILE_CACHE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* bytes per L1 data cache line */ | ||
21 | #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() | ||
22 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | ||
23 | |||
24 | /* bytes per L2 cache line */ | ||
25 | #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() | ||
26 | #define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) | ||
27 | #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) | ||
28 | |||
29 | /* | ||
30 | * TILE-Gx is fully coherents so we don't need to define | ||
31 | * ARCH_KMALLOC_MINALIGN. | ||
32 | */ | ||
33 | #ifndef __tilegx__ | ||
34 | #define ARCH_KMALLOC_MINALIGN L2_CACHE_BYTES | ||
35 | #endif | ||
36 | |||
37 | /* use the cache line size for the L2, which is where it counts */ | ||
38 | #define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT | ||
39 | #define SMP_CACHE_BYTES L2_CACHE_BYTES | ||
40 | #define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT | ||
41 | #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES | ||
42 | |||
43 | /* Group together read-mostly things to avoid cache false sharing */ | ||
44 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
45 | |||
46 | /* | ||
47 | * Attribute for data that is kept read/write coherent until the end of | ||
48 | * initialization, then bumped to read/only incoherent for performance. | ||
49 | */ | ||
50 | #define __write_once __attribute__((__section__(".w1data"))) | ||
51 | |||
52 | #endif /* _ASM_TILE_CACHE_H */ | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h new file mode 100644 index 000000000000..c5741da4eeac --- /dev/null +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHEFLUSH_H | ||
16 | #define _ASM_TILE_CACHEFLUSH_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* Keep includes the same across arches. */ | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/cache.h> | ||
23 | #include <asm/system.h> | ||
24 | #include <arch/icache.h> | ||
25 | |||
26 | /* Caches are physically-indexed and so don't need special treatment */ | ||
27 | #define flush_cache_all() do { } while (0) | ||
28 | #define flush_cache_mm(mm) do { } while (0) | ||
29 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
30 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
31 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
32 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
33 | #define flush_dcache_page(page) do { } while (0) | ||
34 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
35 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
36 | #define flush_cache_vmap(start, end) do { } while (0) | ||
37 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
38 | #define flush_icache_page(vma, pg) do { } while (0) | ||
39 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) | ||
40 | |||
41 | /* Flush the icache just on this cpu */ | ||
42 | extern void __flush_icache_range(unsigned long start, unsigned long end); | ||
43 | |||
44 | /* Flush the entire icache on this cpu. */ | ||
45 | #define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE()) | ||
46 | |||
47 | #ifdef CONFIG_SMP | ||
48 | /* | ||
49 | * When the kernel writes to its own text we need to do an SMP | ||
50 | * broadcast to make the L1I coherent everywhere. This includes | ||
51 | * module load and single step. | ||
52 | */ | ||
53 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
54 | #else | ||
55 | #define flush_icache_range __flush_icache_range | ||
56 | #endif | ||
57 | |||
58 | /* | ||
59 | * An update to an executable user page requires icache flushing. | ||
60 | * We could carefully update only tiles that are running this process, | ||
61 | * and rely on the fact that we flush the icache on every context | ||
62 | * switch to avoid doing extra work here. But for now, I'll be | ||
63 | * conservative and just do a global icache flush. | ||
64 | */ | ||
65 | static inline void copy_to_user_page(struct vm_area_struct *vma, | ||
66 | struct page *page, unsigned long vaddr, | ||
67 | void *dst, void *src, int len) | ||
68 | { | ||
69 | memcpy(dst, src, len); | ||
70 | if (vma->vm_flags & VM_EXEC) { | ||
71 | flush_icache_range((unsigned long) dst, | ||
72 | (unsigned long) dst + len); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
77 | memcpy((dst), (src), (len)) | ||
78 | |||
79 | /* | ||
80 | * Invalidate a VA range; pads to L2 cacheline boundaries. | ||
81 | * | ||
82 | * Note that on TILE64, __inv_buffer() actually flushes modified | ||
83 | * cache lines in addition to invalidating them, i.e., it's the | ||
84 | * same as __finv_buffer(). | ||
85 | */ | ||
86 | static inline void __inv_buffer(void *buffer, size_t size) | ||
87 | { | ||
88 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
89 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
90 | while (next < finish) { | ||
91 | __insn_inv(next); | ||
92 | next += CHIP_INV_STRIDE(); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | /* Flush a VA range; pads to L2 cacheline boundaries. */ | ||
97 | static inline void __flush_buffer(void *buffer, size_t size) | ||
98 | { | ||
99 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
100 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
101 | while (next < finish) { | ||
102 | __insn_flush(next); | ||
103 | next += CHIP_FLUSH_STRIDE(); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | /* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */ | ||
108 | static inline void __finv_buffer(void *buffer, size_t size) | ||
109 | { | ||
110 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
111 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
112 | while (next < finish) { | ||
113 | __insn_finv(next); | ||
114 | next += CHIP_FINV_STRIDE(); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | |||
119 | /* Invalidate a VA range, then memory fence. */ | ||
120 | static inline void inv_buffer(void *buffer, size_t size) | ||
121 | { | ||
122 | __inv_buffer(buffer, size); | ||
123 | mb_incoherent(); | ||
124 | } | ||
125 | |||
126 | /* Flush a VA range, then memory fence. */ | ||
127 | static inline void flush_buffer(void *buffer, size_t size) | ||
128 | { | ||
129 | __flush_buffer(buffer, size); | ||
130 | mb_incoherent(); | ||
131 | } | ||
132 | |||
133 | /* Flush & invalidate a VA range, then memory fence. */ | ||
134 | static inline void finv_buffer(void *buffer, size_t size) | ||
135 | { | ||
136 | __finv_buffer(buffer, size); | ||
137 | mb_incoherent(); | ||
138 | } | ||
139 | |||
140 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h new file mode 100644 index 000000000000..a120766c7264 --- /dev/null +++ b/arch/tile/include/asm/checksum.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CHECKSUM_H | ||
16 | #define _ASM_TILE_CHECKSUM_H | ||
17 | |||
18 | #include <asm-generic/checksum.h> | ||
19 | |||
20 | /* Allow us to provide a more optimized do_csum(). */ | ||
21 | __wsum do_csum(const unsigned char *buff, int len); | ||
22 | #define do_csum do_csum | ||
23 | |||
24 | #endif /* _ASM_TILE_CHECKSUM_H */ | ||
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h new file mode 100644 index 000000000000..5a34da6cdd79 --- /dev/null +++ b/arch/tile/include/asm/compat.h | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_COMPAT_H | ||
16 | #define _ASM_TILE_COMPAT_H | ||
17 | |||
18 | /* | ||
19 | * Architecture specific compatibility types | ||
20 | */ | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/sched.h> | ||
23 | |||
24 | #define COMPAT_USER_HZ 100 | ||
25 | |||
26 | /* "long" and pointer-based types are different. */ | ||
27 | typedef s32 compat_long_t; | ||
28 | typedef u32 compat_ulong_t; | ||
29 | typedef u32 compat_size_t; | ||
30 | typedef s32 compat_ssize_t; | ||
31 | typedef s32 compat_off_t; | ||
32 | typedef s32 compat_time_t; | ||
33 | typedef s32 compat_clock_t; | ||
34 | typedef u32 compat_ino_t; | ||
35 | typedef u32 compat_caddr_t; | ||
36 | typedef u32 compat_uptr_t; | ||
37 | |||
38 | /* Many types are "int" or otherwise the same. */ | ||
39 | typedef __kernel_pid_t compat_pid_t; | ||
40 | typedef __kernel_uid_t __compat_uid_t; | ||
41 | typedef __kernel_gid_t __compat_gid_t; | ||
42 | typedef __kernel_uid32_t __compat_uid32_t; | ||
43 | typedef __kernel_uid32_t __compat_gid32_t; | ||
44 | typedef __kernel_mode_t compat_mode_t; | ||
45 | typedef __kernel_dev_t compat_dev_t; | ||
46 | typedef __kernel_loff_t compat_loff_t; | ||
47 | typedef __kernel_nlink_t compat_nlink_t; | ||
48 | typedef __kernel_ipc_pid_t compat_ipc_pid_t; | ||
49 | typedef __kernel_daddr_t compat_daddr_t; | ||
50 | typedef __kernel_fsid_t compat_fsid_t; | ||
51 | typedef __kernel_timer_t compat_timer_t; | ||
52 | typedef __kernel_key_t compat_key_t; | ||
53 | typedef int compat_int_t; | ||
54 | typedef s64 compat_s64; | ||
55 | typedef uint compat_uint_t; | ||
56 | typedef u64 compat_u64; | ||
57 | |||
58 | /* We use the same register dump format in 32-bit images. */ | ||
59 | typedef unsigned long compat_elf_greg_t; | ||
60 | #define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t)) | ||
61 | typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; | ||
62 | |||
63 | struct compat_timespec { | ||
64 | compat_time_t tv_sec; | ||
65 | s32 tv_nsec; | ||
66 | }; | ||
67 | |||
68 | struct compat_timeval { | ||
69 | compat_time_t tv_sec; | ||
70 | s32 tv_usec; | ||
71 | }; | ||
72 | |||
73 | #define compat_stat stat | ||
74 | #define compat_statfs statfs | ||
75 | |||
76 | struct compat_sysctl { | ||
77 | unsigned int name; | ||
78 | int nlen; | ||
79 | unsigned int oldval; | ||
80 | unsigned int oldlenp; | ||
81 | unsigned int newval; | ||
82 | unsigned int newlen; | ||
83 | unsigned int __unused[4]; | ||
84 | }; | ||
85 | |||
86 | |||
87 | struct compat_flock { | ||
88 | short l_type; | ||
89 | short l_whence; | ||
90 | compat_off_t l_start; | ||
91 | compat_off_t l_len; | ||
92 | compat_pid_t l_pid; | ||
93 | }; | ||
94 | |||
95 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
96 | #define F_SETLK64 13 | ||
97 | #define F_SETLKW64 14 | ||
98 | |||
99 | struct compat_flock64 { | ||
100 | short l_type; | ||
101 | short l_whence; | ||
102 | compat_loff_t l_start; | ||
103 | compat_loff_t l_len; | ||
104 | compat_pid_t l_pid; | ||
105 | }; | ||
106 | |||
107 | #define COMPAT_RLIM_INFINITY 0xffffffff | ||
108 | |||
109 | #define _COMPAT_NSIG 64 | ||
110 | #define _COMPAT_NSIG_BPW 32 | ||
111 | |||
112 | typedef u32 compat_sigset_word; | ||
113 | |||
114 | #define COMPAT_OFF_T_MAX 0x7fffffff | ||
115 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | ||
116 | |||
117 | struct compat_ipc64_perm { | ||
118 | compat_key_t key; | ||
119 | __compat_uid32_t uid; | ||
120 | __compat_gid32_t gid; | ||
121 | __compat_uid32_t cuid; | ||
122 | __compat_gid32_t cgid; | ||
123 | unsigned short mode; | ||
124 | unsigned short __pad1; | ||
125 | unsigned short seq; | ||
126 | unsigned short __pad2; | ||
127 | compat_ulong_t unused1; | ||
128 | compat_ulong_t unused2; | ||
129 | }; | ||
130 | |||
131 | struct compat_semid64_ds { | ||
132 | struct compat_ipc64_perm sem_perm; | ||
133 | compat_time_t sem_otime; | ||
134 | compat_ulong_t __unused1; | ||
135 | compat_time_t sem_ctime; | ||
136 | compat_ulong_t __unused2; | ||
137 | compat_ulong_t sem_nsems; | ||
138 | compat_ulong_t __unused3; | ||
139 | compat_ulong_t __unused4; | ||
140 | }; | ||
141 | |||
142 | struct compat_msqid64_ds { | ||
143 | struct compat_ipc64_perm msg_perm; | ||
144 | compat_time_t msg_stime; | ||
145 | compat_ulong_t __unused1; | ||
146 | compat_time_t msg_rtime; | ||
147 | compat_ulong_t __unused2; | ||
148 | compat_time_t msg_ctime; | ||
149 | compat_ulong_t __unused3; | ||
150 | compat_ulong_t msg_cbytes; | ||
151 | compat_ulong_t msg_qnum; | ||
152 | compat_ulong_t msg_qbytes; | ||
153 | compat_pid_t msg_lspid; | ||
154 | compat_pid_t msg_lrpid; | ||
155 | compat_ulong_t __unused4; | ||
156 | compat_ulong_t __unused5; | ||
157 | }; | ||
158 | |||
159 | struct compat_shmid64_ds { | ||
160 | struct compat_ipc64_perm shm_perm; | ||
161 | compat_size_t shm_segsz; | ||
162 | compat_time_t shm_atime; | ||
163 | compat_ulong_t __unused1; | ||
164 | compat_time_t shm_dtime; | ||
165 | compat_ulong_t __unused2; | ||
166 | compat_time_t shm_ctime; | ||
167 | compat_ulong_t __unused3; | ||
168 | compat_pid_t shm_cpid; | ||
169 | compat_pid_t shm_lpid; | ||
170 | compat_ulong_t shm_nattch; | ||
171 | compat_ulong_t __unused4; | ||
172 | compat_ulong_t __unused5; | ||
173 | }; | ||
174 | |||
175 | /* | ||
176 | * A pointer passed in from user mode. This should not | ||
177 | * be used for syscall parameters, just declare them | ||
178 | * as pointers because the syscall entry code will have | ||
179 | * appropriately converted them already. | ||
180 | */ | ||
181 | |||
182 | static inline void __user *compat_ptr(compat_uptr_t uptr) | ||
183 | { | ||
184 | return (void __user *)(long)(s32)uptr; | ||
185 | } | ||
186 | |||
187 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
188 | { | ||
189 | return (u32)(unsigned long)uptr; | ||
190 | } | ||
191 | |||
192 | /* Sign-extend when storing a kernel pointer to a user's ptregs. */ | ||
193 | static inline unsigned long ptr_to_compat_reg(void __user *uptr) | ||
194 | { | ||
195 | return (long)(int)(long __force)uptr; | ||
196 | } | ||
197 | |||
198 | static inline void __user *compat_alloc_user_space(long len) | ||
199 | { | ||
200 | struct pt_regs *regs = task_pt_regs(current); | ||
201 | return (void __user *)regs->sp - len; | ||
202 | } | ||
203 | |||
204 | static inline int is_compat_task(void) | ||
205 | { | ||
206 | return current_thread_info()->status & TS_COMPAT; | ||
207 | } | ||
208 | |||
209 | extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, | ||
210 | siginfo_t *info, sigset_t *set, | ||
211 | struct pt_regs *regs); | ||
212 | |||
213 | /* Compat syscalls. */ | ||
214 | struct compat_sigaction; | ||
215 | struct compat_siginfo; | ||
216 | struct compat_sigaltstack; | ||
217 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
218 | compat_uptr_t __user *envp); | ||
219 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
220 | struct compat_sigaction __user *oact, | ||
221 | size_t sigsetsize); | ||
222 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
223 | struct compat_siginfo __user *uinfo); | ||
224 | long compat_sys_rt_sigreturn(void); | ||
225 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
226 | struct compat_sigaltstack __user *uoss_ptr); | ||
227 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high); | ||
228 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high); | ||
229 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
230 | u32 dummy, u32 low, u32 high); | ||
231 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
232 | u32 dummy, u32 low, u32 high); | ||
233 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len); | ||
234 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
235 | u32 offset_lo, u32 offset_hi, | ||
236 | u32 nbytes_lo, u32 nbytes_hi); | ||
237 | long compat_sys_fallocate(int fd, int mode, | ||
238 | u32 offset_lo, u32 offset_hi, | ||
239 | u32 len_lo, u32 len_hi); | ||
240 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
241 | struct compat_timespec __user *interval); | ||
242 | |||
243 | /* Versions of compat functions that differ from generic Linux. */ | ||
244 | struct compat_msgbuf; | ||
245 | long tile_compat_sys_msgsnd(int msqid, | ||
246 | struct compat_msgbuf __user *msgp, | ||
247 | size_t msgsz, int msgflg); | ||
248 | long tile_compat_sys_msgrcv(int msqid, | ||
249 | struct compat_msgbuf __user *msgp, | ||
250 | size_t msgsz, long msgtyp, int msgflg); | ||
251 | long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid, | ||
252 | compat_long_t addr, compat_long_t data); | ||
253 | |||
254 | /* Tilera Linux syscalls that don't have "compat" versions. */ | ||
255 | #define compat_sys_flush_cache sys_flush_cache | ||
256 | |||
257 | #endif /* _ASM_TILE_COMPAT_H */ | ||
diff --git a/arch/tile/include/asm/cputime.h b/arch/tile/include/asm/cputime.h new file mode 100644 index 000000000000..6d68ad7e0ea3 --- /dev/null +++ b/arch/tile/include/asm/cputime.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/cputime.h> | |||
diff --git a/arch/tile/include/asm/current.h b/arch/tile/include/asm/current.h new file mode 100644 index 000000000000..da21acf020d3 --- /dev/null +++ b/arch/tile/include/asm/current.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CURRENT_H | ||
16 | #define _ASM_TILE_CURRENT_H | ||
17 | |||
18 | #include <linux/thread_info.h> | ||
19 | |||
20 | struct task_struct; | ||
21 | |||
22 | static inline struct task_struct *get_current(void) | ||
23 | { | ||
24 | return current_thread_info()->task; | ||
25 | } | ||
26 | #define current get_current() | ||
27 | |||
28 | /* Return a usable "task_struct" pointer even if the real one is corrupt. */ | ||
29 | struct task_struct *validate_current(void); | ||
30 | |||
31 | #endif /* _ASM_TILE_CURRENT_H */ | ||
diff --git a/arch/tile/include/asm/delay.h b/arch/tile/include/asm/delay.h new file mode 100644 index 000000000000..97b0e69e704e --- /dev/null +++ b/arch/tile/include/asm/delay.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DELAY_H | ||
16 | #define _ASM_TILE_DELAY_H | ||
17 | |||
18 | /* Undefined functions to get compile-time errors. */ | ||
19 | extern void __bad_udelay(void); | ||
20 | extern void __bad_ndelay(void); | ||
21 | |||
22 | extern void __udelay(unsigned long usecs); | ||
23 | extern void __ndelay(unsigned long nsecs); | ||
24 | extern void __delay(unsigned long loops); | ||
25 | |||
26 | #define udelay(n) (__builtin_constant_p(n) ? \ | ||
27 | ((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \ | ||
28 | __udelay(n)) | ||
29 | |||
30 | #define ndelay(n) (__builtin_constant_p(n) ? \ | ||
31 | ((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \ | ||
32 | __ndelay(n)) | ||
33 | |||
34 | #endif /* _ASM_TILE_DELAY_H */ | ||
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h new file mode 100644 index 000000000000..f0a4c256403b --- /dev/null +++ b/arch/tile/include/asm/device.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/device.h> | |||
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/arch/tile/include/asm/div64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/div64.h> | |||
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h new file mode 100644 index 000000000000..cf466b39aa13 --- /dev/null +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_MAPPING_H | ||
16 | #define _ASM_TILE_DMA_MAPPING_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/cache.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | /* | ||
24 | * Note that on x86 and powerpc, there is a "struct dma_mapping_ops" | ||
25 | * that is used for all the DMA operations. For now, we don't have an | ||
26 | * equivalent on tile, because we only have a single way of doing DMA. | ||
27 | * (Tilera bug 7994 to use dma_mapping_ops.) | ||
28 | */ | ||
29 | |||
30 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
31 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
32 | |||
33 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
34 | enum dma_data_direction); | ||
35 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
36 | size_t size, enum dma_data_direction); | ||
37 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
38 | enum dma_data_direction); | ||
39 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
40 | int nhwentries, enum dma_data_direction); | ||
41 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
42 | unsigned long offset, size_t size, | ||
43 | enum dma_data_direction); | ||
44 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
45 | size_t size, enum dma_data_direction); | ||
46 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
47 | int nelems, enum dma_data_direction); | ||
48 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
49 | int nelems, enum dma_data_direction); | ||
50 | |||
51 | |||
52 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
53 | dma_addr_t *dma_handle, gfp_t flag); | ||
54 | |||
55 | void dma_free_coherent(struct device *dev, size_t size, | ||
56 | void *vaddr, dma_addr_t dma_handle); | ||
57 | |||
58 | extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, | ||
59 | enum dma_data_direction); | ||
60 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, | ||
61 | size_t, enum dma_data_direction); | ||
62 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, | ||
63 | unsigned long offset, size_t, | ||
64 | enum dma_data_direction); | ||
65 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, | ||
66 | unsigned long offset, size_t, | ||
67 | enum dma_data_direction); | ||
68 | extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); | ||
69 | |||
70 | static inline int | ||
71 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static inline int | ||
77 | dma_supported(struct device *dev, u64 mask) | ||
78 | { | ||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | dma_set_mask(struct device *dev, u64 mask) | ||
84 | { | ||
85 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
86 | return -EIO; | ||
87 | |||
88 | *dev->dma_mask = mask; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline int | ||
94 | dma_get_cache_alignment(void) | ||
95 | { | ||
96 | return L2_CACHE_BYTES; | ||
97 | } | ||
98 | |||
99 | #define dma_is_consistent(d, h) (1) | ||
100 | |||
101 | |||
102 | #endif /* _ASM_TILE_DMA_MAPPING_H */ | ||
diff --git a/arch/tile/include/asm/dma.h b/arch/tile/include/asm/dma.h new file mode 100644 index 000000000000..12a7ca16d164 --- /dev/null +++ b/arch/tile/include/asm/dma.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_H | ||
16 | #define _ASM_TILE_DMA_H | ||
17 | |||
18 | #include <asm-generic/dma.h> | ||
19 | |||
20 | /* Needed by drivers/pci/quirks.c */ | ||
21 | #ifdef CONFIG_PCI | ||
22 | extern int isa_dma_bridge_buggy; | ||
23 | #endif | ||
24 | |||
25 | #endif /* _ASM_TILE_DMA_H */ | ||
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h new file mode 100644 index 000000000000..623a6bb741c1 --- /dev/null +++ b/arch/tile/include/asm/elf.h | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_ELF_H | ||
16 | #define _ASM_TILE_ELF_H | ||
17 | |||
18 | /* | ||
19 | * ELF register definitions. | ||
20 | */ | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #include <linux/ptrace.h> | ||
25 | #include <asm/byteorder.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | typedef unsigned long elf_greg_t; | ||
29 | |||
30 | #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) | ||
31 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
32 | |||
33 | #define EM_TILE64 187 | ||
34 | #define EM_TILEPRO 188 | ||
35 | #define EM_TILEGX 191 | ||
36 | |||
37 | /* Provide a nominal data structure. */ | ||
38 | #define ELF_NFPREG 0 | ||
39 | typedef double elf_fpreg_t; | ||
40 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
41 | |||
42 | #ifdef __tilegx__ | ||
43 | #define ELF_CLASS ELFCLASS64 | ||
44 | #else | ||
45 | #define ELF_CLASS ELFCLASS32 | ||
46 | #endif | ||
47 | #define ELF_DATA ELFDATA2LSB | ||
48 | |||
49 | /* | ||
50 | * There seems to be a bug in how compat_binfmt_elf.c works: it | ||
51 | * #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info(). | ||
52 | * Hack around this by providing an enum value of ELF_ARCH. | ||
53 | */ | ||
54 | enum { ELF_ARCH = CHIP_ELF_TYPE() }; | ||
55 | #define ELF_ARCH ELF_ARCH | ||
56 | |||
57 | /* | ||
58 | * This is used to ensure we don't load something for the wrong architecture. | ||
59 | */ | ||
60 | #define elf_check_arch(x) \ | ||
61 | ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ | ||
62 | (x)->e_machine == CHIP_ELF_TYPE()) | ||
63 | |||
64 | /* The module loader only handles a few relocation types. */ | ||
65 | #ifndef __tilegx__ | ||
66 | #define R_TILE_32 1 | ||
67 | #define R_TILE_JOFFLONG_X1 15 | ||
68 | #define R_TILE_IMM16_X0_LO 25 | ||
69 | #define R_TILE_IMM16_X1_LO 26 | ||
70 | #define R_TILE_IMM16_X0_HA 29 | ||
71 | #define R_TILE_IMM16_X1_HA 30 | ||
72 | #else | ||
73 | #define R_TILEGX_64 1 | ||
74 | #define R_TILEGX_JUMPOFF_X1 21 | ||
75 | #define R_TILEGX_IMM16_X0_HW0 36 | ||
76 | #define R_TILEGX_IMM16_X1_HW0 37 | ||
77 | #define R_TILEGX_IMM16_X0_HW1 38 | ||
78 | #define R_TILEGX_IMM16_X1_HW1 39 | ||
79 | #define R_TILEGX_IMM16_X0_HW2_LAST 48 | ||
80 | #define R_TILEGX_IMM16_X1_HW2_LAST 49 | ||
81 | #endif | ||
82 | |||
83 | /* Use standard page size for core dumps. */ | ||
84 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | ||
85 | |||
86 | /* | ||
87 | * This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
88 | * use of this is to invoke "./ld.so someprog" to test out a new version of | ||
89 | * the loader. We need to make sure that it is out of the way of the program | ||
90 | * that it will "exec", and that there is sufficient room for the brk. | ||
91 | */ | ||
92 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
93 | |||
94 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | ||
95 | memcpy((char *) &_dest, (char *) _regs, \ | ||
96 | sizeof(struct pt_regs)); | ||
97 | |||
98 | /* No additional FP registers to copy. */ | ||
99 | #define ELF_CORE_COPY_FPREGS(t, fpu) 0 | ||
100 | |||
101 | /* | ||
102 | * This yields a mask that user programs can use to figure out what | ||
103 | * instruction set this CPU supports. This could be done in user space, | ||
104 | * but it's not easy, and we've already done it here. | ||
105 | */ | ||
106 | #define ELF_HWCAP (0) | ||
107 | |||
108 | /* | ||
109 | * This yields a string that ld.so will use to load implementation | ||
110 | * specific libraries for optimization. This is more specific in | ||
111 | * intent than poking at uname or /proc/cpuinfo. | ||
112 | */ | ||
113 | #define ELF_PLATFORM (NULL) | ||
114 | |||
115 | extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr); | ||
116 | |||
117 | #define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr) | ||
118 | |||
119 | extern int dump_task_regs(struct task_struct *, elf_gregset_t *); | ||
120 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
121 | |||
122 | /* Tilera Linux has no personalities currently, so no need to do anything. */ | ||
123 | #define SET_PERSONALITY(ex) do { } while (0) | ||
124 | |||
125 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
126 | /* Support auto-mapping of the user interrupt vectors. */ | ||
127 | struct linux_binprm; | ||
128 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
129 | int executable_stack); | ||
130 | #ifdef CONFIG_COMPAT | ||
131 | |||
132 | #define COMPAT_ELF_PLATFORM "tilegx-m32" | ||
133 | |||
134 | /* | ||
135 | * "Compat" binaries have the same machine type, but 32-bit class, | ||
136 | * since they're not a separate machine type, but just a 32-bit | ||
137 | * variant of the standard 64-bit architecture. | ||
138 | */ | ||
139 | #define compat_elf_check_arch(x) \ | ||
140 | ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \ | ||
141 | (x)->e_machine == CHIP_ELF_TYPE()) | ||
142 | |||
143 | #define compat_start_thread(regs, ip, usp) do { \ | ||
144 | regs->pc = ptr_to_compat_reg((void *)(ip)); \ | ||
145 | regs->sp = ptr_to_compat_reg((void *)(usp)); \ | ||
146 | } while (0) | ||
147 | |||
148 | /* | ||
149 | * Use SET_PERSONALITY to indicate compatibility via TS_COMPAT. | ||
150 | */ | ||
151 | #undef SET_PERSONALITY | ||
152 | #define SET_PERSONALITY(ex) \ | ||
153 | do { \ | ||
154 | current->personality = PER_LINUX; \ | ||
155 | current_thread_info()->status &= ~TS_COMPAT; \ | ||
156 | } while (0) | ||
157 | #define COMPAT_SET_PERSONALITY(ex) \ | ||
158 | do { \ | ||
159 | current->personality = PER_LINUX_32BIT; \ | ||
160 | current_thread_info()->status |= TS_COMPAT; \ | ||
161 | } while (0) | ||
162 | |||
163 | #define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2) | ||
164 | |||
165 | #endif /* CONFIG_COMPAT */ | ||
166 | |||
167 | #endif /* _ASM_TILE_ELF_H */ | ||
diff --git a/arch/tile/include/asm/emergency-restart.h b/arch/tile/include/asm/emergency-restart.h new file mode 100644 index 000000000000..3711bd9d50bd --- /dev/null +++ b/arch/tile/include/asm/emergency-restart.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/emergency-restart.h> | |||
diff --git a/arch/tile/include/asm/errno.h b/arch/tile/include/asm/errno.h new file mode 100644 index 000000000000..4c82b503d92f --- /dev/null +++ b/arch/tile/include/asm/errno.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/errno.h> | |||
diff --git a/arch/tile/include/asm/fcntl.h b/arch/tile/include/asm/fcntl.h new file mode 100644 index 000000000000..46ab12db5739 --- /dev/null +++ b/arch/tile/include/asm/fcntl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/fcntl.h> | |||
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h new file mode 100644 index 000000000000..51537ff9265a --- /dev/null +++ b/arch/tile/include/asm/fixmap.h | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1998 Ingo Molnar | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_FIXMAP_H | ||
17 | #define _ASM_TILE_FIXMAP_H | ||
18 | |||
19 | #include <asm/page.h> | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | #include <linux/kernel.h> | ||
23 | #ifdef CONFIG_HIGHMEM | ||
24 | #include <linux/threads.h> | ||
25 | #include <asm/kmap_types.h> | ||
26 | #endif | ||
27 | |||
28 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
29 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
30 | |||
31 | /* | ||
32 | * Here we define all the compile-time 'special' virtual | ||
33 | * addresses. The point is to have a constant address at | ||
34 | * compile time, but to set the physical address only | ||
35 | * in the boot process. We allocate these special addresses | ||
36 | * from the end of supervisor virtual memory backwards. | ||
37 | * Also this lets us do fail-safe vmalloc(), we | ||
38 | * can guarantee that these special addresses and | ||
39 | * vmalloc()-ed addresses never overlap. | ||
40 | * | ||
41 | * these 'compile-time allocated' memory buffers are | ||
42 | * fixed-size 4k pages. (or larger if used with an increment | ||
43 | * higher than 1) use fixmap_set(idx,phys) to associate | ||
44 | * physical memory with fixmap indices. | ||
45 | * | ||
46 | * TLB entries of such buffers will not be flushed across | ||
47 | * task switches. | ||
48 | * | ||
49 | * We don't bother with a FIX_HOLE since above the fixmaps | ||
50 | * is unmapped memory in any case. | ||
51 | */ | ||
52 | enum fixed_addresses { | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
55 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
56 | #endif | ||
57 | __end_of_permanent_fixed_addresses, | ||
58 | |||
59 | /* | ||
60 | * Temporary boot-time mappings, used before ioremap() is functional. | ||
61 | * Not currently needed by the Tile architecture. | ||
62 | */ | ||
63 | #define NR_FIX_BTMAPS 0 | ||
64 | #if NR_FIX_BTMAPS | ||
65 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | ||
66 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, | ||
67 | __end_of_fixed_addresses | ||
68 | #else | ||
69 | __end_of_fixed_addresses = __end_of_permanent_fixed_addresses | ||
70 | #endif | ||
71 | }; | ||
72 | |||
73 | extern void __set_fixmap(enum fixed_addresses idx, | ||
74 | unsigned long phys, pgprot_t flags); | ||
75 | |||
76 | #define set_fixmap(idx, phys) \ | ||
77 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
78 | /* | ||
79 | * Some hardware wants to get fixmapped without caching. | ||
80 | */ | ||
81 | #define set_fixmap_nocache(idx, phys) \ | ||
82 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
83 | |||
84 | #define clear_fixmap(idx) \ | ||
85 | __set_fixmap(idx, 0, __pgprot(0)) | ||
86 | |||
87 | #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
88 | #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
89 | #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) | ||
90 | #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) | ||
91 | |||
92 | extern void __this_fixmap_does_not_exist(void); | ||
93 | |||
94 | /* | ||
95 | * 'index to address' translation. If anyone tries to use the idx | ||
96 | * directly without tranlation, we catch the bug with a NULL-deference | ||
97 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
98 | */ | ||
99 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
100 | { | ||
101 | /* | ||
102 | * this branch gets completely eliminated after inlining, | ||
103 | * except when someone tries to use fixaddr indices in an | ||
104 | * illegal way. (such as mixing up address types or using | ||
105 | * out-of-range indices). | ||
106 | * | ||
107 | * If it doesn't get removed, the linker will complain | ||
108 | * loudly with a reasonably clear error message.. | ||
109 | */ | ||
110 | if (idx >= __end_of_fixed_addresses) | ||
111 | __this_fixmap_does_not_exist(); | ||
112 | |||
113 | return __fix_to_virt(idx); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
117 | { | ||
118 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
119 | return __virt_to_fix(vaddr); | ||
120 | } | ||
121 | |||
122 | #endif /* !__ASSEMBLY__ */ | ||
123 | |||
124 | #endif /* _ASM_TILE_FIXMAP_H */ | ||
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h new file mode 100644 index 000000000000..461459b06d98 --- /dev/null +++ b/arch/tile/include/asm/ftrace.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_FTRACE_H | ||
16 | #define _ASM_TILE_FTRACE_H | ||
17 | |||
18 | /* empty */ | ||
19 | |||
20 | #endif /* _ASM_TILE_FTRACE_H */ | ||
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h new file mode 100644 index 000000000000..fe0d10dcae57 --- /dev/null +++ b/arch/tile/include/asm/futex.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * These routines make two important assumptions: | ||
15 | * | ||
16 | * 1. atomic_t is really an int and can be freely cast back and forth | ||
17 | * (validated in __init_atomic_per_cpu). | ||
18 | * | ||
19 | * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using | ||
20 | * the same locking convention that all the kernel atomic routines use. | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_TILE_FUTEX_H | ||
24 | #define _ASM_TILE_FUTEX_H | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | #include <linux/futex.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/errno.h> | ||
31 | |||
32 | extern struct __get_user futex_set(int __user *v, int i); | ||
33 | extern struct __get_user futex_add(int __user *v, int n); | ||
34 | extern struct __get_user futex_or(int __user *v, int n); | ||
35 | extern struct __get_user futex_andn(int __user *v, int n); | ||
36 | extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); | ||
37 | |||
38 | #ifndef __tilegx__ | ||
39 | extern struct __get_user futex_xor(int __user *v, int n); | ||
40 | #else | ||
41 | static inline struct __get_user futex_xor(int __user *uaddr, int n) | ||
42 | { | ||
43 | struct __get_user asm_ret = __get_user_4(uaddr); | ||
44 | if (!asm_ret.err) { | ||
45 | int oldval, newval; | ||
46 | do { | ||
47 | oldval = asm_ret.val; | ||
48 | newval = oldval ^ n; | ||
49 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
50 | } while (asm_ret.err == 0 && oldval != asm_ret.val); | ||
51 | } | ||
52 | return asm_ret; | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
57 | { | ||
58 | int op = (encoded_op >> 28) & 7; | ||
59 | int cmp = (encoded_op >> 24) & 15; | ||
60 | int oparg = (encoded_op << 8) >> 20; | ||
61 | int cmparg = (encoded_op << 20) >> 20; | ||
62 | int ret; | ||
63 | struct __get_user asm_ret; | ||
64 | |||
65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
66 | oparg = 1 << oparg; | ||
67 | |||
68 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
69 | return -EFAULT; | ||
70 | |||
71 | pagefault_disable(); | ||
72 | switch (op) { | ||
73 | case FUTEX_OP_SET: | ||
74 | asm_ret = futex_set(uaddr, oparg); | ||
75 | break; | ||
76 | case FUTEX_OP_ADD: | ||
77 | asm_ret = futex_add(uaddr, oparg); | ||
78 | break; | ||
79 | case FUTEX_OP_OR: | ||
80 | asm_ret = futex_or(uaddr, oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_ANDN: | ||
83 | asm_ret = futex_andn(uaddr, oparg); | ||
84 | break; | ||
85 | case FUTEX_OP_XOR: | ||
86 | asm_ret = futex_xor(uaddr, oparg); | ||
87 | break; | ||
88 | default: | ||
89 | asm_ret.err = -ENOSYS; | ||
90 | } | ||
91 | pagefault_enable(); | ||
92 | |||
93 | ret = asm_ret.err; | ||
94 | |||
95 | if (!ret) { | ||
96 | switch (cmp) { | ||
97 | case FUTEX_OP_CMP_EQ: | ||
98 | ret = (asm_ret.val == cmparg); | ||
99 | break; | ||
100 | case FUTEX_OP_CMP_NE: | ||
101 | ret = (asm_ret.val != cmparg); | ||
102 | break; | ||
103 | case FUTEX_OP_CMP_LT: | ||
104 | ret = (asm_ret.val < cmparg); | ||
105 | break; | ||
106 | case FUTEX_OP_CMP_GE: | ||
107 | ret = (asm_ret.val >= cmparg); | ||
108 | break; | ||
109 | case FUTEX_OP_CMP_LE: | ||
110 | ret = (asm_ret.val <= cmparg); | ||
111 | break; | ||
112 | case FUTEX_OP_CMP_GT: | ||
113 | ret = (asm_ret.val > cmparg); | ||
114 | break; | ||
115 | default: | ||
116 | ret = -ENOSYS; | ||
117 | } | ||
118 | } | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | ||
123 | int newval) | ||
124 | { | ||
125 | struct __get_user asm_ret; | ||
126 | |||
127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
128 | return -EFAULT; | ||
129 | |||
130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
131 | return asm_ret.err ? asm_ret.err : asm_ret.val; | ||
132 | } | ||
133 | |||
134 | #ifndef __tilegx__ | ||
135 | /* Return failure from the atomic wrappers. */ | ||
136 | struct __get_user __atomic_bad_address(int __user *addr); | ||
137 | #endif | ||
138 | |||
139 | #endif /* !__ASSEMBLY__ */ | ||
140 | |||
141 | #endif /* _ASM_TILE_FUTEX_H */ | ||
diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h new file mode 100644 index 000000000000..822390f9a154 --- /dev/null +++ b/arch/tile/include/asm/hardirq.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HARDIRQ_H | ||
16 | #define _ASM_TILE_HARDIRQ_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/cache.h> | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | |||
23 | typedef struct { | ||
24 | unsigned int __softirq_pending; | ||
25 | long idle_timestamp; | ||
26 | |||
27 | /* Hard interrupt statistics. */ | ||
28 | unsigned int irq_timer_count; | ||
29 | unsigned int irq_syscall_count; | ||
30 | unsigned int irq_resched_count; | ||
31 | unsigned int irq_hv_flush_count; | ||
32 | unsigned int irq_call_count; | ||
33 | unsigned int irq_hv_msg_count; | ||
34 | unsigned int irq_dev_intr_count; | ||
35 | |||
36 | } ____cacheline_aligned irq_cpustat_t; | ||
37 | |||
38 | DECLARE_PER_CPU(irq_cpustat_t, irq_stat); | ||
39 | |||
40 | #define __ARCH_IRQ_STAT | ||
41 | #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) | ||
42 | |||
43 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
44 | |||
45 | #define HARDIRQ_BITS 8 | ||
46 | |||
47 | #endif /* _ASM_TILE_HARDIRQ_H */ | ||
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h new file mode 100644 index 000000000000..0bed3ec7b42c --- /dev/null +++ b/arch/tile/include/asm/hardwall.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Provide methods for the HARDWALL_FILE for accessing the UDN. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_HARDWALL_H | ||
18 | #define _ASM_TILE_HARDWALL_H | ||
19 | |||
20 | #include <linux/ioctl.h> | ||
21 | |||
22 | #define HARDWALL_IOCTL_BASE 0xa2 | ||
23 | |||
24 | /* | ||
25 | * The HARDWALL_CREATE() ioctl is a macro with a "size" argument. | ||
26 | * The resulting ioctl value is passed to the kernel in conjunction | ||
27 | * with a pointer to a little-endian bitmask of cpus, which must be | ||
28 | * physically in a rectangular configuration on the chip. | ||
29 | * The "size" is the number of bytes of cpu mask data. | ||
30 | */ | ||
31 | #define _HARDWALL_CREATE 1 | ||
32 | #define HARDWALL_CREATE(size) \ | ||
33 | _IOC(_IOC_READ, HARDWALL_IOCTL_BASE, _HARDWALL_CREATE, (size)) | ||
34 | |||
35 | #define _HARDWALL_ACTIVATE 2 | ||
36 | #define HARDWALL_ACTIVATE \ | ||
37 | _IO(HARDWALL_IOCTL_BASE, _HARDWALL_ACTIVATE) | ||
38 | |||
39 | #define _HARDWALL_DEACTIVATE 3 | ||
40 | #define HARDWALL_DEACTIVATE \ | ||
41 | _IO(HARDWALL_IOCTL_BASE, _HARDWALL_DEACTIVATE) | ||
42 | |||
43 | #ifndef __KERNEL__ | ||
44 | |||
45 | /* This is the canonical name expected by userspace. */ | ||
46 | #define HARDWALL_FILE "/dev/hardwall" | ||
47 | |||
48 | #else | ||
49 | |||
50 | /* Hook for /proc/tile/hardwall. */ | ||
51 | struct seq_file; | ||
52 | int proc_tile_hardwall_show(struct seq_file *sf, void *v); | ||
53 | |||
54 | #endif | ||
55 | |||
56 | #endif /* _ASM_TILE_HARDWALL_H */ | ||
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h new file mode 100644 index 000000000000..efdd12e91020 --- /dev/null +++ b/arch/tile/include/asm/highmem.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
3 | * Gerhard.Wichert@pdb.siemens.de | ||
4 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation, version 2. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
17 | * are not addressable by direct kernel virtual addresses. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_TILE_HIGHMEM_H | ||
22 | #define _ASM_TILE_HIGHMEM_H | ||
23 | |||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/homecache.h> | ||
29 | |||
30 | /* declarations for highmem.c */ | ||
31 | extern unsigned long highstart_pfn, highend_pfn; | ||
32 | |||
33 | extern pte_t *pkmap_page_table; | ||
34 | |||
35 | /* | ||
36 | * Ordering is: | ||
37 | * | ||
38 | * FIXADDR_TOP | ||
39 | * fixed_addresses | ||
40 | * FIXADDR_START | ||
41 | * temp fixed addresses | ||
42 | * FIXADDR_BOOT_START | ||
43 | * Persistent kmap area | ||
44 | * PKMAP_BASE | ||
45 | * VMALLOC_END | ||
46 | * Vmalloc area | ||
47 | * VMALLOC_START | ||
48 | * high_memory | ||
49 | */ | ||
50 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | ||
51 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | ||
52 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
53 | |||
54 | void *kmap_high(struct page *page); | ||
55 | void kunmap_high(struct page *page); | ||
56 | void *kmap(struct page *page); | ||
57 | void kunmap(struct page *page); | ||
58 | void *kmap_fix_kpte(struct page *page, int finished); | ||
59 | |||
60 | /* This macro is used only in map_new_virtual() to map "page". */ | ||
61 | #define kmap_prot page_to_kpgprot(page) | ||
62 | |||
63 | void kunmap_atomic(void *kvaddr, enum km_type type); | ||
64 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | ||
65 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
66 | struct page *kmap_atomic_to_page(void *ptr); | ||
67 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | ||
68 | void *kmap_atomic(struct page *page, enum km_type type); | ||
69 | void kmap_atomic_fix_kpte(struct page *page, int finished); | ||
70 | |||
71 | #define flush_cache_kmaps() do { } while (0) | ||
72 | |||
73 | #endif /* _ASM_TILE_HIGHMEM_H */ | ||
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h new file mode 100644 index 000000000000..a8243865d49e --- /dev/null +++ b/arch/tile/include/asm/homecache.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Handle issues around the Tile "home cache" model of coherence. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_HOMECACHE_H | ||
18 | #define _ASM_TILE_HOMECACHE_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | |||
23 | struct page; | ||
24 | struct task_struct; | ||
25 | struct vm_area_struct; | ||
26 | struct zone; | ||
27 | |||
28 | /* | ||
29 | * Coherence point for the page is its memory controller. | ||
30 | * It is not present in any cache (L1 or L2). | ||
31 | */ | ||
32 | #define PAGE_HOME_UNCACHED -1 | ||
33 | |||
34 | /* | ||
35 | * Is this page immutable (unwritable) and thus able to be cached more | ||
36 | * widely than would otherwise be possible? On tile64 this means we | ||
37 | * mark the PTE to cache locally; on tilepro it means we have "nc" set. | ||
38 | */ | ||
39 | #define PAGE_HOME_IMMUTABLE -2 | ||
40 | |||
41 | /* | ||
42 | * Each cpu considers its own cache to be the home for the page, | ||
43 | * which makes it incoherent. | ||
44 | */ | ||
45 | #define PAGE_HOME_INCOHERENT -3 | ||
46 | |||
47 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
48 | /* Home for the page is distributed via hash-for-home. */ | ||
49 | #define PAGE_HOME_HASH -4 | ||
50 | #endif | ||
51 | |||
52 | /* Homing is unknown or unspecified. Not valid for page_home(). */ | ||
53 | #define PAGE_HOME_UNKNOWN -5 | ||
54 | |||
55 | /* Home on the current cpu. Not valid for page_home(). */ | ||
56 | #define PAGE_HOME_HERE -6 | ||
57 | |||
58 | /* Support wrapper to use instead of explicit hv_flush_remote(). */ | ||
59 | extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, | ||
60 | const struct cpumask *cache_cpumask, | ||
61 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
62 | unsigned long tlb_pgsize, | ||
63 | const struct cpumask *tlb_cpumask, | ||
64 | HV_Remote_ASID *asids, int asidcount); | ||
65 | |||
66 | /* Set homing-related bits in a PTE (can also pass a pgprot_t). */ | ||
67 | extern pte_t pte_set_home(pte_t pte, int home); | ||
68 | |||
69 | /* Do a cache eviction on the specified cpus. */ | ||
70 | extern void homecache_evict(const struct cpumask *mask); | ||
71 | |||
72 | /* | ||
73 | * Change a kernel page's homecache. It must not be mapped in user space. | ||
74 | * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when | ||
75 | * no other cpu can reference the page, and causes a full-chip cache/TLB flush. | ||
76 | */ | ||
77 | extern void homecache_change_page_home(struct page *, int order, int home); | ||
78 | |||
79 | /* | ||
80 | * Flush a page out of whatever cache(s) it is in. | ||
81 | * This is more than just finv, since it properly handles waiting | ||
82 | * for the data to reach memory on tilepro, but it can be quite | ||
83 | * heavyweight, particularly on hash-for-home memory. | ||
84 | */ | ||
85 | extern void homecache_flush_cache(struct page *, int order); | ||
86 | |||
87 | /* | ||
88 | * Allocate a page with the given GFP flags, home, and optionally | ||
89 | * node. These routines are actually just wrappers around the normal | ||
90 | * alloc_pages() / alloc_pages_node() functions, which set and clear | ||
91 | * a per-cpu variable to communicate with homecache_new_kernel_page(). | ||
92 | * If !CONFIG_HOMECACHE, uses homecache_change_page_home(). | ||
93 | */ | ||
94 | extern struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
95 | unsigned int order, int home); | ||
96 | extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
97 | unsigned int order, int home); | ||
98 | #define homecache_alloc_page(gfp_mask, home) \ | ||
99 | homecache_alloc_pages(gfp_mask, 0, home) | ||
100 | |||
101 | /* | ||
102 | * These routines are just pass-throughs to free_pages() when | ||
103 | * we support full homecaching. If !CONFIG_HOMECACHE, then these | ||
104 | * routines use homecache_change_page_home() to reset the home | ||
105 | * back to the default before returning the page to the allocator. | ||
106 | */ | ||
107 | void homecache_free_pages(unsigned long addr, unsigned int order); | ||
108 | #define homecache_free_page(page) \ | ||
109 | homecache_free_pages((page), 0) | ||
110 | |||
111 | |||
112 | |||
113 | /* | ||
114 | * Report the page home for LOWMEM pages by examining their kernel PTE, | ||
115 | * or for highmem pages as the default home. | ||
116 | */ | ||
117 | extern int page_home(struct page *); | ||
118 | |||
119 | #define homecache_migrate_kthread() do {} while (0) | ||
120 | |||
121 | #define homecache_kpte_lock() 0 | ||
122 | #define homecache_kpte_unlock(flags) do {} while (0) | ||
123 | |||
124 | |||
125 | #endif /* _ASM_TILE_HOMECACHE_H */ | ||
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h new file mode 100644 index 000000000000..0521c277bbde --- /dev/null +++ b/arch/tile/include/asm/hugetlb.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HUGETLB_H | ||
16 | #define _ASM_TILE_HUGETLB_H | ||
17 | |||
18 | #include <asm/page.h> | ||
19 | |||
20 | |||
21 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
22 | unsigned long addr, | ||
23 | unsigned long len) { | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * If the arch doesn't supply something else, assume that hugepage | ||
29 | * size aligned regions are ok without further preparation. | ||
30 | */ | ||
31 | static inline int prepare_hugepage_range(struct file *file, | ||
32 | unsigned long addr, unsigned long len) | ||
33 | { | ||
34 | struct hstate *h = hstate_file(file); | ||
35 | if (len & ~huge_page_mask(h)) | ||
36 | return -EINVAL; | ||
37 | if (addr & ~huge_page_mask(h)) | ||
38 | return -EINVAL; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
47 | unsigned long addr, unsigned long end, | ||
48 | unsigned long floor, | ||
49 | unsigned long ceiling) | ||
50 | { | ||
51 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
52 | } | ||
53 | |||
54 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
55 | pte_t *ptep, pte_t pte) | ||
56 | { | ||
57 | set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER); | ||
58 | } | ||
59 | |||
60 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
61 | unsigned long addr, pte_t *ptep) | ||
62 | { | ||
63 | return ptep_get_and_clear(mm, addr, ptep); | ||
64 | } | ||
65 | |||
66 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
67 | unsigned long addr, pte_t *ptep) | ||
68 | { | ||
69 | ptep_clear_flush(vma, addr, ptep); | ||
70 | } | ||
71 | |||
72 | static inline int huge_pte_none(pte_t pte) | ||
73 | { | ||
74 | return pte_none(pte); | ||
75 | } | ||
76 | |||
77 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
78 | { | ||
79 | return pte_wrprotect(pte); | ||
80 | } | ||
81 | |||
82 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
83 | unsigned long addr, pte_t *ptep) | ||
84 | { | ||
85 | ptep_set_wrprotect(mm, addr, ptep); | ||
86 | } | ||
87 | |||
88 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
89 | unsigned long addr, pte_t *ptep, | ||
90 | pte_t pte, int dirty) | ||
91 | { | ||
92 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
93 | } | ||
94 | |||
95 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
96 | { | ||
97 | return *ptep; | ||
98 | } | ||
99 | |||
100 | static inline int arch_prepare_hugepage(struct page *page) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline void arch_release_hugepage(struct page *page) | ||
106 | { | ||
107 | } | ||
108 | |||
109 | #endif /* _ASM_TILE_HUGETLB_H */ | ||
diff --git a/arch/tile/include/asm/hv_driver.h b/arch/tile/include/asm/hv_driver.h new file mode 100644 index 000000000000..ad614de899b3 --- /dev/null +++ b/arch/tile/include/asm/hv_driver.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This header defines a wrapper interface for managing hypervisor | ||
15 | * device calls that will result in an interrupt at some later time. | ||
16 | * In particular, this provides wrappers for hv_preada() and | ||
17 | * hv_pwritea(). | ||
18 | */ | ||
19 | |||
20 | #ifndef _ASM_TILE_HV_DRIVER_H | ||
21 | #define _ASM_TILE_HV_DRIVER_H | ||
22 | |||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | struct hv_driver_cb; | ||
26 | |||
27 | /* A callback to be invoked when an operation completes. */ | ||
28 | typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result); | ||
29 | |||
30 | /* | ||
31 | * A structure to hold information about an outstanding call. | ||
32 | * The driver must allocate a separate structure for each call. | ||
33 | */ | ||
34 | struct hv_driver_cb { | ||
35 | hv_driver_callback_t *callback; /* Function to call on interrupt. */ | ||
36 | void *dev; /* Driver-specific state variable. */ | ||
37 | }; | ||
38 | |||
39 | /* Wrapper for invoking hv_dev_preada(). */ | ||
40 | static inline int | ||
41 | tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
42 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
43 | struct hv_driver_cb *callback) | ||
44 | { | ||
45 | return hv_dev_preada(devhdl, flags, sgl_len, sgl, | ||
46 | offset, (HV_IntArg)callback); | ||
47 | } | ||
48 | |||
49 | /* Wrapper for invoking hv_dev_pwritea(). */ | ||
50 | static inline int | ||
51 | tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
52 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
53 | struct hv_driver_cb *callback) | ||
54 | { | ||
55 | return hv_dev_pwritea(devhdl, flags, sgl_len, sgl, | ||
56 | offset, (HV_IntArg)callback); | ||
57 | } | ||
58 | |||
59 | |||
60 | #endif /* _ASM_TILE_HV_DRIVER_H */ | ||
diff --git a/arch/tile/include/asm/hw_irq.h b/arch/tile/include/asm/hw_irq.h new file mode 100644 index 000000000000..4fac5fbf333e --- /dev/null +++ b/arch/tile/include/asm/hw_irq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HW_IRQ_H | ||
16 | #define _ASM_TILE_HW_IRQ_H | ||
17 | |||
18 | #endif /* _ASM_TILE_HW_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/ide.h b/arch/tile/include/asm/ide.h new file mode 100644 index 000000000000..3c6f2ed894ce --- /dev/null +++ b/arch/tile/include/asm/ide.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IDE_H | ||
16 | #define _ASM_TILE_IDE_H | ||
17 | |||
18 | /* For IDE on PCI */ | ||
19 | #define MAX_HWIFS 10 | ||
20 | |||
21 | #define ide_default_io_ctl(base) (0) | ||
22 | |||
23 | #include <asm-generic/ide_iops.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_IDE_H */ | ||
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h new file mode 100644 index 000000000000..8c95bef3fa45 --- /dev/null +++ b/arch/tile/include/asm/io.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IO_H | ||
16 | #define _ASM_TILE_IO_H | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/bug.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | #define IO_SPACE_LIMIT 0xfffffffful | ||
23 | |||
24 | /* | ||
25 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
26 | * access. | ||
27 | */ | ||
28 | #define xlate_dev_mem_ptr(p) __va(p) | ||
29 | |||
30 | /* | ||
31 | * Convert a virtual cached pointer to an uncached pointer. | ||
32 | */ | ||
33 | #define xlate_dev_kmem_ptr(p) p | ||
34 | |||
35 | /* | ||
36 | * Change "struct page" to physical address. | ||
37 | */ | ||
38 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | ||
39 | |||
40 | /* | ||
41 | * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to | ||
42 | * long before casting it to a pointer to avoid compiler warnings. | ||
43 | */ | ||
44 | #if CHIP_HAS_MMIO() | ||
45 | extern void __iomem *ioremap(resource_size_t offset, unsigned long size); | ||
46 | extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, | ||
47 | pgprot_t pgprot); | ||
48 | extern void iounmap(volatile void __iomem *addr); | ||
49 | #else | ||
50 | #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr)) | ||
51 | #define iounmap(addr) ((void)0) | ||
52 | #endif | ||
53 | |||
54 | #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) | ||
55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) | ||
56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) | ||
57 | |||
58 | void __iomem *ioport_map(unsigned long port, unsigned int len); | ||
59 | extern inline void ioport_unmap(void __iomem *addr) {} | ||
60 | |||
61 | #define mmiowb() | ||
62 | |||
63 | /* Conversion between virtual and physical mappings. */ | ||
64 | #define mm_ptov(addr) ((void *)phys_to_virt(addr)) | ||
65 | #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr)) | ||
66 | |||
67 | #ifdef CONFIG_PCI | ||
68 | |||
69 | extern u8 _tile_readb(unsigned long addr); | ||
70 | extern u16 _tile_readw(unsigned long addr); | ||
71 | extern u32 _tile_readl(unsigned long addr); | ||
72 | extern u64 _tile_readq(unsigned long addr); | ||
73 | extern void _tile_writeb(u8 val, unsigned long addr); | ||
74 | extern void _tile_writew(u16 val, unsigned long addr); | ||
75 | extern void _tile_writel(u32 val, unsigned long addr); | ||
76 | extern void _tile_writeq(u64 val, unsigned long addr); | ||
77 | |||
78 | #else | ||
79 | |||
80 | /* | ||
81 | * The Tile architecture does not support IOMEM unless PCI is enabled. | ||
82 | * Unfortunately we can't yet simply not declare these methods, | ||
83 | * since some generic code that compiles into the kernel, but | ||
84 | * we never run, uses them unconditionally. | ||
85 | */ | ||
86 | |||
87 | static inline int iomem_panic(void) | ||
88 | { | ||
89 | panic("readb/writeb and friends do not exist on tile without PCI"); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline u8 _tile_readb(unsigned long addr) | ||
94 | { | ||
95 | return iomem_panic(); | ||
96 | } | ||
97 | |||
98 | static inline u16 _tile_readw(unsigned long addr) | ||
99 | { | ||
100 | return iomem_panic(); | ||
101 | } | ||
102 | |||
103 | static inline u32 _tile_readl(unsigned long addr) | ||
104 | { | ||
105 | return iomem_panic(); | ||
106 | } | ||
107 | |||
108 | static inline u64 _tile_readq(unsigned long addr) | ||
109 | { | ||
110 | return iomem_panic(); | ||
111 | } | ||
112 | |||
113 | static inline void _tile_writeb(u8 val, unsigned long addr) | ||
114 | { | ||
115 | iomem_panic(); | ||
116 | } | ||
117 | |||
118 | static inline void _tile_writew(u16 val, unsigned long addr) | ||
119 | { | ||
120 | iomem_panic(); | ||
121 | } | ||
122 | |||
123 | static inline void _tile_writel(u32 val, unsigned long addr) | ||
124 | { | ||
125 | iomem_panic(); | ||
126 | } | ||
127 | |||
128 | static inline void _tile_writeq(u64 val, unsigned long addr) | ||
129 | { | ||
130 | iomem_panic(); | ||
131 | } | ||
132 | |||
133 | #endif | ||
134 | |||
135 | #define readb(addr) _tile_readb((unsigned long)addr) | ||
136 | #define readw(addr) _tile_readw((unsigned long)addr) | ||
137 | #define readl(addr) _tile_readl((unsigned long)addr) | ||
138 | #define readq(addr) _tile_readq((unsigned long)addr) | ||
139 | #define writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||
140 | #define writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||
141 | #define writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||
142 | #define writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||
143 | |||
144 | #define __raw_readb readb | ||
145 | #define __raw_readw readw | ||
146 | #define __raw_readl readl | ||
147 | #define __raw_readq readq | ||
148 | #define __raw_writeb writeb | ||
149 | #define __raw_writew writew | ||
150 | #define __raw_writel writel | ||
151 | #define __raw_writeq writeq | ||
152 | |||
153 | #define readb_relaxed readb | ||
154 | #define readw_relaxed readw | ||
155 | #define readl_relaxed readl | ||
156 | #define readq_relaxed readq | ||
157 | |||
158 | #define ioread8 readb | ||
159 | #define ioread16 readw | ||
160 | #define ioread32 readl | ||
161 | #define ioread64 readq | ||
162 | #define iowrite8 writeb | ||
163 | #define iowrite16 writew | ||
164 | #define iowrite32 writel | ||
165 | #define iowrite64 writeq | ||
166 | |||
167 | static inline void *memcpy_fromio(void *dst, void *src, int len) | ||
168 | { | ||
169 | int x; | ||
170 | BUG_ON((unsigned long)src & 0x3); | ||
171 | for (x = 0; x < len; x += 4) | ||
172 | *(u32 *)(dst + x) = readl(src + x); | ||
173 | return dst; | ||
174 | } | ||
175 | |||
176 | static inline void *memcpy_toio(void *dst, void *src, int len) | ||
177 | { | ||
178 | int x; | ||
179 | BUG_ON((unsigned long)dst & 0x3); | ||
180 | for (x = 0; x < len; x += 4) | ||
181 | writel(*(u32 *)(src + x), dst + x); | ||
182 | return dst; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * The Tile architecture does not support IOPORT, even with PCI. | ||
187 | * Unfortunately we can't yet simply not declare these methods, | ||
188 | * since some generic code that compiles into the kernel, but | ||
189 | * we never run, uses them unconditionally. | ||
190 | */ | ||
191 | |||
192 | static inline int ioport_panic(void) | ||
193 | { | ||
194 | panic("inb/outb and friends do not exist on tile"); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static inline u8 inb(unsigned long addr) | ||
199 | { | ||
200 | return ioport_panic(); | ||
201 | } | ||
202 | |||
203 | static inline u16 inw(unsigned long addr) | ||
204 | { | ||
205 | return ioport_panic(); | ||
206 | } | ||
207 | |||
208 | static inline u32 inl(unsigned long addr) | ||
209 | { | ||
210 | return ioport_panic(); | ||
211 | } | ||
212 | |||
213 | static inline void outb(u8 b, unsigned long addr) | ||
214 | { | ||
215 | ioport_panic(); | ||
216 | } | ||
217 | |||
218 | static inline void outw(u16 b, unsigned long addr) | ||
219 | { | ||
220 | ioport_panic(); | ||
221 | } | ||
222 | |||
223 | static inline void outl(u32 b, unsigned long addr) | ||
224 | { | ||
225 | ioport_panic(); | ||
226 | } | ||
227 | |||
228 | #define inb_p(addr) inb(addr) | ||
229 | #define inw_p(addr) inw(addr) | ||
230 | #define inl_p(addr) inl(addr) | ||
231 | #define outb_p(x, addr) outb((x), (addr)) | ||
232 | #define outw_p(x, addr) outw((x), (addr)) | ||
233 | #define outl_p(x, addr) outl((x), (addr)) | ||
234 | |||
235 | static inline void insb(unsigned long addr, void *buffer, int count) | ||
236 | { | ||
237 | ioport_panic(); | ||
238 | } | ||
239 | |||
240 | static inline void insw(unsigned long addr, void *buffer, int count) | ||
241 | { | ||
242 | ioport_panic(); | ||
243 | } | ||
244 | |||
245 | static inline void insl(unsigned long addr, void *buffer, int count) | ||
246 | { | ||
247 | ioport_panic(); | ||
248 | } | ||
249 | |||
250 | static inline void outsb(unsigned long addr, const void *buffer, int count) | ||
251 | { | ||
252 | ioport_panic(); | ||
253 | } | ||
254 | |||
255 | static inline void outsw(unsigned long addr, const void *buffer, int count) | ||
256 | { | ||
257 | ioport_panic(); | ||
258 | } | ||
259 | |||
260 | static inline void outsl(unsigned long addr, const void *buffer, int count) | ||
261 | { | ||
262 | ioport_panic(); | ||
263 | } | ||
264 | |||
265 | #define ioread8_rep(p, dst, count) \ | ||
266 | insb((unsigned long) (p), (dst), (count)) | ||
267 | #define ioread16_rep(p, dst, count) \ | ||
268 | insw((unsigned long) (p), (dst), (count)) | ||
269 | #define ioread32_rep(p, dst, count) \ | ||
270 | insl((unsigned long) (p), (dst), (count)) | ||
271 | |||
272 | #define iowrite8_rep(p, src, count) \ | ||
273 | outsb((unsigned long) (p), (src), (count)) | ||
274 | #define iowrite16_rep(p, src, count) \ | ||
275 | outsw((unsigned long) (p), (src), (count)) | ||
276 | #define iowrite32_rep(p, src, count) \ | ||
277 | outsl((unsigned long) (p), (src), (count)) | ||
278 | |||
279 | #endif /* _ASM_TILE_IO_H */ | ||
diff --git a/arch/tile/include/asm/ioctl.h b/arch/tile/include/asm/ioctl.h new file mode 100644 index 000000000000..b279fe06dfe5 --- /dev/null +++ b/arch/tile/include/asm/ioctl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctl.h> | |||
diff --git a/arch/tile/include/asm/ioctls.h b/arch/tile/include/asm/ioctls.h new file mode 100644 index 000000000000..ec34c760665e --- /dev/null +++ b/arch/tile/include/asm/ioctls.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctls.h> | |||
diff --git a/arch/tile/include/asm/ipc.h b/arch/tile/include/asm/ipc.h new file mode 100644 index 000000000000..a46e3d9c2a3f --- /dev/null +++ b/arch/tile/include/asm/ipc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipc.h> | |||
diff --git a/arch/tile/include/asm/ipcbuf.h b/arch/tile/include/asm/ipcbuf.h new file mode 100644 index 000000000000..84c7e51cb6d0 --- /dev/null +++ b/arch/tile/include/asm/ipcbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipcbuf.h> | |||
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h new file mode 100644 index 000000000000..572fd3ef1d73 --- /dev/null +++ b/arch/tile/include/asm/irq.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQ_H | ||
16 | #define _ASM_TILE_IRQ_H | ||
17 | |||
18 | #include <linux/hardirq.h> | ||
19 | |||
20 | /* The hypervisor interface provides 32 IRQs. */ | ||
21 | #define NR_IRQS 32 | ||
22 | |||
23 | /* IRQ numbers used for linux IPIs. */ | ||
24 | #define IRQ_RESCHEDULE 1 | ||
25 | |||
26 | void ack_bad_irq(unsigned int irq); | ||
27 | |||
28 | /* | ||
29 | * Different ways of handling interrupts. Tile interrupts are always | ||
30 | * per-cpu; there is no global interrupt controller to implement | ||
31 | * enable/disable. Most onboard devices can send their interrupts to | ||
32 | * many tiles at the same time, and Tile-specific drivers know how to | ||
33 | * deal with this. | ||
34 | * | ||
35 | * However, generic devices (usually PCIE based, sometimes GPIO) | ||
36 | * expect that interrupts will fire on a single core at a time and | ||
37 | * that the irq can be enabled or disabled from any core at any time. | ||
38 | * We implement this by directing such interrupts to a single core. | ||
39 | * | ||
40 | * One added wrinkle is that PCI interrupts can be either | ||
41 | * hardware-cleared (legacy interrupts) or software cleared (MSI). | ||
42 | * Other generic device systems (GPIO) are always software-cleared. | ||
43 | * | ||
44 | * The enums below are used by drivers for onboard devices, including | ||
45 | * the internals of PCI root complex and GPIO. They allow the driver | ||
46 | * to tell the generic irq code what kind of interrupt is mapped to a | ||
47 | * particular IRQ number. | ||
48 | */ | ||
49 | enum { | ||
50 | /* per-cpu interrupt; use enable/disable_percpu_irq() to mask */ | ||
51 | TILE_IRQ_PERCPU, | ||
52 | /* global interrupt, hardware responsible for clearing. */ | ||
53 | TILE_IRQ_HW_CLEAR, | ||
54 | /* global interrupt, software responsible for clearing. */ | ||
55 | TILE_IRQ_SW_CLEAR, | ||
56 | }; | ||
57 | |||
58 | |||
59 | /* | ||
60 | * Paravirtualized drivers should call this when they dynamically | ||
61 | * allocate a new IRQ or discover an IRQ that was pre-allocated by the | ||
62 | * hypervisor for use with their particular device. This gives the | ||
63 | * IRQ subsystem an opportunity to do interrupt-type-specific | ||
64 | * initialization. | ||
65 | * | ||
66 | * ISSUE: We should modify this API so that registering anything | ||
67 | * except percpu interrupts also requires providing callback methods | ||
68 | * for enabling and disabling the interrupt. This would allow the | ||
69 | * generic IRQ code to proxy enable/disable_irq() calls back into the | ||
70 | * PCI subsystem, which in turn could enable or disable the interrupt | ||
71 | * at the PCI shim. | ||
72 | */ | ||
73 | void tile_irq_activate(unsigned int irq, int tile_irq_type); | ||
74 | |||
75 | /* | ||
76 | * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know | ||
77 | * how to use enable/disable_percpu_irq() to manage interrupts on each | ||
78 | * core. We can't use the generic enable/disable_irq() because they | ||
79 | * use a single reference count per irq, rather than per cpu per irq. | ||
80 | */ | ||
81 | void enable_percpu_irq(unsigned int irq); | ||
82 | void disable_percpu_irq(unsigned int irq); | ||
83 | |||
84 | |||
85 | void setup_irq_regs(void); | ||
86 | |||
87 | #endif /* _ASM_TILE_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/irq_regs.h b/arch/tile/include/asm/irq_regs.h new file mode 100644 index 000000000000..3dd9c0b70270 --- /dev/null +++ b/arch/tile/include/asm/irq_regs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/irq_regs.h> | |||
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h new file mode 100644 index 000000000000..45cf67c2f286 --- /dev/null +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -0,0 +1,266 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQFLAGS_H | ||
16 | #define _ASM_TILE_IRQFLAGS_H | ||
17 | |||
18 | #include <arch/interrupts.h> | ||
19 | #include <arch/chip.h> | ||
20 | |||
21 | /* | ||
22 | * The set of interrupts we want to allow when interrupts are nominally | ||
23 | * disabled. The remainder are effectively "NMI" interrupts from | ||
24 | * the point of view of the generic Linux code. Note that synchronous | ||
25 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | ||
26 | */ | ||
27 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
28 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
29 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
30 | #else | ||
31 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
32 | (~(INT_MASK(INT_PERF_COUNT))) | ||
33 | #endif | ||
34 | |||
35 | #ifndef __ASSEMBLY__ | ||
36 | |||
37 | /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ | ||
38 | #include <asm/percpu.h> | ||
39 | #include <arch/spr_def.h> | ||
40 | |||
41 | /* Set and clear kernel interrupt masks. */ | ||
42 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
43 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 | ||
44 | # error Fix assumptions about which word various interrupts are in | ||
45 | #endif | ||
46 | #define interrupt_mask_set(n) do { \ | ||
47 | int __n = (n); \ | ||
48 | int __mask = 1 << (__n & 0x1f); \ | ||
49 | if (__n < 32) \ | ||
50 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ | ||
51 | else \ | ||
52 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ | ||
53 | } while (0) | ||
54 | #define interrupt_mask_reset(n) do { \ | ||
55 | int __n = (n); \ | ||
56 | int __mask = 1 << (__n & 0x1f); \ | ||
57 | if (__n < 32) \ | ||
58 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ | ||
59 | else \ | ||
60 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ | ||
61 | } while (0) | ||
62 | #define interrupt_mask_check(n) ({ \ | ||
63 | int __n = (n); \ | ||
64 | (((__n < 32) ? \ | ||
65 | __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ | ||
66 | __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ | ||
67 | >> (__n & 0x1f)) & 1; \ | ||
68 | }) | ||
69 | #define interrupt_mask_set_mask(mask) do { \ | ||
70 | unsigned long long __m = (mask); \ | ||
71 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ | ||
72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ | ||
73 | } while (0) | ||
74 | #define interrupt_mask_reset_mask(mask) do { \ | ||
75 | unsigned long long __m = (mask); \ | ||
76 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ | ||
77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ | ||
78 | } while (0) | ||
79 | #else | ||
80 | #define interrupt_mask_set(n) \ | ||
81 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) | ||
82 | #define interrupt_mask_reset(n) \ | ||
83 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) | ||
84 | #define interrupt_mask_check(n) \ | ||
85 | ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) | ||
86 | #define interrupt_mask_set_mask(mask) \ | ||
87 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) | ||
88 | #define interrupt_mask_reset_mask(mask) \ | ||
89 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) | ||
90 | #endif | ||
91 | |||
92 | /* | ||
93 | * The set of interrupts we want active if irqs are enabled. | ||
94 | * Note that in particular, the tile timer interrupt comes and goes | ||
95 | * from this set, since we have no other way to turn off the timer. | ||
96 | * Likewise, INTCTRL_1 is removed and re-added during device | ||
97 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. | ||
98 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it | ||
99 | * is always claimed as an "active interrupt" so we can query that bit | ||
100 | * to know our current state. | ||
101 | */ | ||
102 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | ||
103 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | ||
104 | |||
105 | /* Disable interrupts. */ | ||
106 | #define raw_local_irq_disable() \ | ||
107 | interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) | ||
108 | |||
109 | /* Disable all interrupts, including NMIs. */ | ||
110 | #define raw_local_irq_disable_all() \ | ||
111 | interrupt_mask_set_mask(-1UL) | ||
112 | |||
113 | /* Re-enable all maskable interrupts. */ | ||
114 | #define raw_local_irq_enable() \ | ||
115 | interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) | ||
116 | |||
117 | /* Disable or enable interrupts based on flag argument. */ | ||
118 | #define raw_local_irq_restore(disabled) do { \ | ||
119 | if (disabled) \ | ||
120 | raw_local_irq_disable(); \ | ||
121 | else \ | ||
122 | raw_local_irq_enable(); \ | ||
123 | } while (0) | ||
124 | |||
125 | /* Return true if "flags" argument means interrupts are disabled. */ | ||
126 | #define raw_irqs_disabled_flags(flags) ((flags) != 0) | ||
127 | |||
128 | /* Return true if interrupts are currently disabled. */ | ||
129 | #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) | ||
130 | |||
131 | /* Save whether interrupts are currently disabled. */ | ||
132 | #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) | ||
133 | |||
134 | /* Save whether interrupts are currently disabled, then disable them. */ | ||
135 | #define raw_local_irq_save(flags) \ | ||
136 | do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) | ||
137 | |||
138 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | ||
139 | #define raw_local_irq_mask(interrupt) \ | ||
140 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | ||
141 | |||
142 | /* Prevent the given interrupt from being enabled immediately. */ | ||
143 | #define raw_local_irq_mask_now(interrupt) do { \ | ||
144 | raw_local_irq_mask(interrupt); \ | ||
145 | interrupt_mask_set(interrupt); \ | ||
146 | } while (0) | ||
147 | |||
148 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | ||
149 | #define raw_local_irq_unmask(interrupt) \ | ||
150 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | ||
151 | |||
152 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | ||
153 | #define raw_local_irq_unmask_now(interrupt) do { \ | ||
154 | raw_local_irq_unmask(interrupt); \ | ||
155 | if (!irqs_disabled()) \ | ||
156 | interrupt_mask_reset(interrupt); \ | ||
157 | } while (0) | ||
158 | |||
159 | #else /* __ASSEMBLY__ */ | ||
160 | |||
161 | /* We provide a somewhat more restricted set for assembly. */ | ||
162 | |||
163 | #ifdef __tilegx__ | ||
164 | |||
165 | #if INT_MEM_ERROR != 0 | ||
166 | # error Fix IRQ_DISABLED() macro | ||
167 | #endif | ||
168 | |||
169 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | ||
170 | #define IRQS_DISABLED(tmp) \ | ||
171 | mfspr tmp, INTERRUPT_MASK_1; \ | ||
172 | andi tmp, tmp, 1 | ||
173 | |||
174 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
175 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
176 | moveli reg, hw2_last(interrupts_enabled_mask); \ | ||
177 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ | ||
178 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ | ||
179 | add reg, reg, tp | ||
180 | |||
181 | /* Disable interrupts. */ | ||
182 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
183 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ | ||
184 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ | ||
185 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ | ||
186 | mtspr INTERRUPT_MASK_SET_1, tmp0 | ||
187 | |||
188 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
189 | #define IRQ_DISABLE_ALL(tmp) \ | ||
190 | movei tmp, -1; \ | ||
191 | mtspr INTERRUPT_MASK_SET_1, tmp | ||
192 | |||
193 | /* Enable interrupts. */ | ||
194 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
195 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
196 | ld tmp0, tmp0; \ | ||
197 | mtspr INTERRUPT_MASK_RESET_1, tmp0 | ||
198 | |||
199 | #else /* !__tilegx__ */ | ||
200 | |||
201 | /* | ||
202 | * Return 0 or 1 to indicate whether interrupts are currently disabled. | ||
203 | * Note that it's important that we use a bit from the "low" mask word, | ||
204 | * since when we are enabling, that is the word we write first, so if we | ||
205 | * are interrupted after only writing half of the mask, the interrupt | ||
206 | * handler will correctly observe that we have interrupts enabled, and | ||
207 | * will enable interrupts itself on return from the interrupt handler | ||
208 | * (making the original code's write of the "high" mask word idempotent). | ||
209 | */ | ||
210 | #define IRQS_DISABLED(tmp) \ | ||
211 | mfspr tmp, INTERRUPT_MASK_1_0; \ | ||
212 | shri tmp, tmp, INT_MEM_ERROR; \ | ||
213 | andi tmp, tmp, 1 | ||
214 | |||
215 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
216 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
217 | moveli reg, lo16(interrupts_enabled_mask); \ | ||
218 | auli reg, reg, ha16(interrupts_enabled_mask);\ | ||
219 | add reg, reg, tp | ||
220 | |||
221 | /* Disable interrupts. */ | ||
222 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
223 | { \ | ||
224 | movei tmp0, -1; \ | ||
225 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ | ||
226 | }; \ | ||
227 | { \ | ||
228 | mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ | ||
229 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ | ||
230 | }; \ | ||
231 | mtspr INTERRUPT_MASK_SET_1_1, tmp1 | ||
232 | |||
233 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
234 | #define IRQ_DISABLE_ALL(tmp) \ | ||
235 | movei tmp, -1; \ | ||
236 | mtspr INTERRUPT_MASK_SET_1_0, tmp; \ | ||
237 | mtspr INTERRUPT_MASK_SET_1_1, tmp | ||
238 | |||
239 | /* Enable interrupts. */ | ||
240 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
241 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
242 | { \ | ||
243 | lw tmp0, tmp0; \ | ||
244 | addi tmp1, tmp0, 4 \ | ||
245 | }; \ | ||
246 | lw tmp1, tmp1; \ | ||
247 | mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ | ||
248 | mtspr INTERRUPT_MASK_RESET_1_1, tmp1 | ||
249 | #endif | ||
250 | |||
251 | /* | ||
252 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
253 | * C function, but almost everywhere we do, we don't mind clobbering | ||
254 | * all the caller-saved registers. | ||
255 | */ | ||
256 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
257 | # define TRACE_IRQS_ON jal trace_hardirqs_on | ||
258 | # define TRACE_IRQS_OFF jal trace_hardirqs_off | ||
259 | #else | ||
260 | # define TRACE_IRQS_ON | ||
261 | # define TRACE_IRQS_OFF | ||
262 | #endif | ||
263 | |||
264 | #endif /* __ASSEMBLY__ */ | ||
265 | |||
266 | #endif /* _ASM_TILE_IRQFLAGS_H */ | ||
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/arch/tile/include/asm/kdebug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/kdebug.h> | |||
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h new file mode 100644 index 000000000000..c11a6cc73bb8 --- /dev/null +++ b/arch/tile/include/asm/kexec.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on kexec.h from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_KEXEC_H | ||
18 | #define _ASM_TILE_KEXEC_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | |||
22 | /* Maximum physical address we can use pages from. */ | ||
23 | #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE | ||
24 | /* Maximum address we can reach in physical address mode. */ | ||
25 | #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE | ||
26 | /* Maximum address we can use for the control code buffer. */ | ||
27 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | ||
28 | |||
29 | #define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE | ||
30 | |||
31 | /* | ||
32 | * We don't bother to provide a unique identifier, since we can only | ||
33 | * reboot with a single type of kernel image anyway. | ||
34 | */ | ||
35 | #define KEXEC_ARCH KEXEC_ARCH_DEFAULT | ||
36 | |||
37 | /* Use the tile override for the page allocator. */ | ||
38 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order); | ||
39 | #define kimage_alloc_pages_arch kimage_alloc_pages_arch | ||
40 | |||
41 | #define MAX_NOTE_BYTES 1024 | ||
42 | |||
43 | /* Defined in arch/tile/kernel/relocate_kernel.S */ | ||
44 | extern const unsigned char relocate_new_kernel[]; | ||
45 | extern const unsigned long relocate_new_kernel_size; | ||
46 | extern void relocate_new_kernel_end(void); | ||
47 | |||
48 | /* Provide a dummy definition to avoid build failures. */ | ||
49 | static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | #endif /* _ASM_TILE_KEXEC_H */ | ||
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h new file mode 100644 index 000000000000..1480106d1c05 --- /dev/null +++ b/arch/tile/include/asm/kmap_types.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_KMAP_TYPES_H | ||
16 | #define _ASM_TILE_KMAP_TYPES_H | ||
17 | |||
18 | /* | ||
19 | * In TILE Linux each set of four of these uses another 16MB chunk of | ||
20 | * address space, given 64 tiles and 64KB pages, so we only enable | ||
21 | * ones that are required by the kernel configuration. | ||
22 | */ | ||
23 | enum km_type { | ||
24 | KM_BOUNCE_READ, | ||
25 | KM_SKB_SUNRPC_DATA, | ||
26 | KM_SKB_DATA_SOFTIRQ, | ||
27 | KM_USER0, | ||
28 | KM_USER1, | ||
29 | KM_BIO_SRC_IRQ, | ||
30 | KM_IRQ0, | ||
31 | KM_IRQ1, | ||
32 | KM_SOFTIRQ0, | ||
33 | KM_SOFTIRQ1, | ||
34 | KM_MEMCPY0, | ||
35 | KM_MEMCPY1, | ||
36 | #if defined(CONFIG_HIGHPTE) | ||
37 | KM_PTE0, | ||
38 | KM_PTE1, | ||
39 | #endif | ||
40 | KM_TYPE_NR | ||
41 | }; | ||
42 | |||
43 | #endif /* _ASM_TILE_KMAP_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/linkage.h b/arch/tile/include/asm/linkage.h new file mode 100644 index 000000000000..e121c39751a7 --- /dev/null +++ b/arch/tile/include/asm/linkage.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_LINKAGE_H | ||
16 | #define _ASM_TILE_LINKAGE_H | ||
17 | |||
18 | #include <feedback.h> | ||
19 | |||
20 | #define __ALIGN .align 8 | ||
21 | |||
22 | /* | ||
23 | * The STD_ENTRY and STD_ENDPROC macros put the function in a | ||
24 | * self-named .text.foo section, and if linker feedback collection | ||
25 | * is enabled, add a suitable call to the feedback collection code. | ||
26 | * STD_ENTRY_SECTION lets you specify a non-standard section name. | ||
27 | */ | ||
28 | |||
29 | #define STD_ENTRY(name) \ | ||
30 | .pushsection .text.##name, "ax"; \ | ||
31 | ENTRY(name); \ | ||
32 | FEEDBACK_ENTER(name) | ||
33 | |||
34 | #define STD_ENTRY_SECTION(name, section) \ | ||
35 | .pushsection section, "ax"; \ | ||
36 | ENTRY(name); \ | ||
37 | FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name) | ||
38 | |||
39 | #define STD_ENDPROC(name) \ | ||
40 | ENDPROC(name); \ | ||
41 | .Lend_##name:; \ | ||
42 | .popsection | ||
43 | |||
44 | /* Create a file-static function entry set up for feedback gathering. */ | ||
45 | #define STD_ENTRY_LOCAL(name) \ | ||
46 | .pushsection .text.##name, "ax"; \ | ||
47 | ALIGN; \ | ||
48 | name:; \ | ||
49 | FEEDBACK_ENTER(name) | ||
50 | |||
51 | #endif /* _ASM_TILE_LINKAGE_H */ | ||
diff --git a/arch/tile/include/asm/local.h b/arch/tile/include/asm/local.h new file mode 100644 index 000000000000..c11c530f74d0 --- /dev/null +++ b/arch/tile/include/asm/local.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local.h> | |||
diff --git a/arch/tile/include/asm/memprof.h b/arch/tile/include/asm/memprof.h new file mode 100644 index 000000000000..359949be28c1 --- /dev/null +++ b/arch/tile/include/asm/memprof.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The hypervisor's memory controller profiling infrastructure allows | ||
15 | * the programmer to find out what fraction of the available memory | ||
16 | * bandwidth is being consumed at each memory controller. The | ||
17 | * profiler provides start, stop, and clear operations to allows | ||
18 | * profiling over a specific time window, as well as an interface for | ||
19 | * reading the most recent profile values. | ||
20 | * | ||
21 | * This header declares IOCTL codes necessary to control memprof. | ||
22 | */ | ||
23 | #ifndef _ASM_TILE_MEMPROF_H | ||
24 | #define _ASM_TILE_MEMPROF_H | ||
25 | |||
26 | #include <linux/ioctl.h> | ||
27 | |||
28 | #define MEMPROF_IOCTL_TYPE 0xB4 | ||
29 | #define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0) | ||
30 | #define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1) | ||
31 | #define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2) | ||
32 | |||
33 | #endif /* _ASM_TILE_MEMPROF_H */ | ||
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h new file mode 100644 index 000000000000..4c6811e3e8dc --- /dev/null +++ b/arch/tile/include/asm/mman.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMAN_H | ||
16 | #define _ASM_TILE_MMAN_H | ||
17 | |||
18 | #include <asm-generic/mman-common.h> | ||
19 | #include <arch/chip.h> | ||
20 | |||
21 | /* Standard Linux flags */ | ||
22 | |||
23 | #define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */ | ||
24 | #define MAP_NONBLOCK 0x0080 /* do not block on IO */ | ||
25 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
26 | #define MAP_LOCKED 0x0200 /* pages are locked */ | ||
27 | #define MAP_NORESERVE 0x0400 /* don't check for reservations */ | ||
28 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
29 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
30 | #define MAP_HUGETLB 0x4000 /* create a huge page mapping */ | ||
31 | |||
32 | |||
33 | /* | ||
34 | * Flags for mlockall | ||
35 | */ | ||
36 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
37 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
38 | |||
39 | |||
40 | #endif /* _ASM_TILE_MMAN_H */ | ||
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h new file mode 100644 index 000000000000..92f94c77b6e4 --- /dev/null +++ b/arch/tile/include/asm/mmu.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_H | ||
16 | #define _ASM_TILE_MMU_H | ||
17 | |||
18 | /* Capture any arch- and mm-specific information. */ | ||
19 | struct mm_context { | ||
20 | /* | ||
21 | * Written under the mmap_sem semaphore; read without the | ||
22 | * semaphore but atomically, but it is conservatively set. | ||
23 | */ | ||
24 | unsigned int priority_cached; | ||
25 | }; | ||
26 | |||
27 | typedef struct mm_context mm_context_t; | ||
28 | |||
29 | void leave_mm(int cpu); | ||
30 | |||
31 | #endif /* _ASM_TILE_MMU_H */ | ||
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h new file mode 100644 index 000000000000..9bc0d0725c28 --- /dev/null +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_CONTEXT_H | ||
16 | #define _ASM_TILE_MMU_CONTEXT_H | ||
17 | |||
18 | #include <linux/smp.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/homecache.h> | ||
25 | #include <asm-generic/mm_hooks.h> | ||
26 | |||
27 | static inline int | ||
28 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ | ||
34 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | ||
35 | { | ||
36 | /* FIXME: DIRECTIO should not always be set. FIXME. */ | ||
37 | int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); | ||
38 | if (rc < 0) | ||
39 | panic("hv_install_context failed: %d", rc); | ||
40 | } | ||
41 | |||
42 | static inline void install_page_table(pgd_t *pgdir, int asid) | ||
43 | { | ||
44 | pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); | ||
45 | __install_page_table(pgdir, asid, *ptep); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * "Lazy" TLB mode is entered when we are switching to a kernel task, | ||
50 | * which borrows the mm of the previous task. The goal of this | ||
51 | * optimization is to avoid having to install a new page table. On | ||
52 | * early x86 machines (where the concept originated) you couldn't do | ||
53 | * anything short of a full page table install for invalidation, so | ||
54 | * handling a remote TLB invalidate required doing a page table | ||
55 | * re-install. Someone clearly decided that it was silly to keep | ||
56 | * doing this while in "lazy" TLB mode, so the optimization involves | ||
57 | * installing the swapper page table instead the first time one | ||
58 | * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running | ||
59 | * the kernel task doesn't need to take any more interrupts. At that | ||
60 | * point it's then necessary to explicitly reinstall it when context | ||
61 | * switching back to the original mm. | ||
62 | * | ||
63 | * On Tile, we have to do a page-table install whenever DMA is enabled, | ||
64 | * so in that case lazy mode doesn't help anyway. And more generally, | ||
65 | * we have efficient per-page TLB shootdown, and don't expect to spend | ||
66 | * that much time in kernel tasks in general, so just leaving the | ||
67 | * kernel task borrowing the old page table, but handling TLB | ||
68 | * shootdowns, is a reasonable thing to do. And importantly, this | ||
69 | * lets us use the hypervisor's internal APIs for TLB shootdown, which | ||
70 | * means we don't have to worry about having TLB shootdowns blocked | ||
71 | * when Linux is disabling interrupts; see the page migration code for | ||
72 | * an example of where it's important for TLB shootdowns to complete | ||
73 | * even when interrupts are disabled at the Linux level. | ||
74 | */ | ||
75 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) | ||
76 | { | ||
77 | #if CHIP_HAS_TILE_DMA() | ||
78 | /* | ||
79 | * We have to do an "identity" page table switch in order to | ||
80 | * clear any pending DMA interrupts. | ||
81 | */ | ||
82 | if (current->thread.tile_dma_state.enabled) | ||
83 | install_page_table(mm->pgd, __get_cpu_var(current_asid)); | ||
84 | #endif | ||
85 | } | ||
86 | |||
87 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
88 | struct task_struct *tsk) | ||
89 | { | ||
90 | if (likely(prev != next)) { | ||
91 | |||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | /* Pick new ASID. */ | ||
95 | int asid = __get_cpu_var(current_asid) + 1; | ||
96 | if (asid > max_asid) { | ||
97 | asid = min_asid; | ||
98 | local_flush_tlb(); | ||
99 | } | ||
100 | __get_cpu_var(current_asid) = asid; | ||
101 | |||
102 | /* Clear cpu from the old mm, and set it in the new one. */ | ||
103 | cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); | ||
104 | cpumask_set_cpu(cpu, &next->cpu_vm_mask); | ||
105 | |||
106 | /* Re-load page tables */ | ||
107 | install_page_table(next->pgd, asid); | ||
108 | |||
109 | /* See how we should set the red/black cache info */ | ||
110 | check_mm_caching(prev, next); | ||
111 | |||
112 | /* | ||
113 | * Since we're changing to a new mm, we have to flush | ||
114 | * the icache in case some physical page now being mapped | ||
115 | * has subsequently been repurposed and has new code. | ||
116 | */ | ||
117 | __flush_icache(); | ||
118 | |||
119 | } | ||
120 | } | ||
121 | |||
122 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
123 | struct mm_struct *next_mm) | ||
124 | { | ||
125 | switch_mm(prev_mm, next_mm, NULL); | ||
126 | } | ||
127 | |||
128 | #define destroy_context(mm) do { } while (0) | ||
129 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
130 | |||
131 | #endif /* _ASM_TILE_MMU_CONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h new file mode 100644 index 000000000000..c6344c4f32ac --- /dev/null +++ b/arch/tile/include/asm/mmzone.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMZONE_H | ||
16 | #define _ASM_TILE_MMZONE_H | ||
17 | |||
18 | extern struct pglist_data node_data[]; | ||
19 | #define NODE_DATA(nid) (&node_data[nid]) | ||
20 | |||
21 | extern void get_memcfg_numa(void); | ||
22 | |||
23 | #ifdef CONFIG_DISCONTIGMEM | ||
24 | |||
25 | #include <asm/page.h> | ||
26 | |||
27 | /* | ||
28 | * Generally, memory ranges are always doled out by the hypervisor in | ||
29 | * fixed-size, power-of-two increments. That would make computing the node | ||
30 | * very easy. We could just take a couple high bits of the PA, which | ||
31 | * denote the memory shim, and we'd be done. However, when we're doing | ||
32 | * memory striping, this may not be true; PAs with different high bit | ||
33 | * values might be in the same node. Thus, we keep a lookup table to | ||
34 | * translate the high bits of the PFN to the node number. | ||
35 | */ | ||
36 | extern int highbits_to_node[]; | ||
37 | |||
38 | static inline int pfn_to_nid(unsigned long pfn) | ||
39 | { | ||
40 | return highbits_to_node[__pfn_to_highbits(pfn)]; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Following are macros that each numa implmentation must define. | ||
45 | */ | ||
46 | |||
47 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
48 | #define node_end_pfn(nid) \ | ||
49 | ({ \ | ||
50 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
51 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
52 | }) | ||
53 | |||
54 | #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) | ||
55 | |||
56 | static inline int pfn_valid(int pfn) | ||
57 | { | ||
58 | int nid = pfn_to_nid(pfn); | ||
59 | |||
60 | if (nid >= 0) | ||
61 | return (pfn < node_end_pfn(nid)); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* Information on the NUMA nodes that we compute early */ | ||
66 | extern unsigned long node_start_pfn[]; | ||
67 | extern unsigned long node_end_pfn[]; | ||
68 | extern unsigned long node_memmap_pfn[]; | ||
69 | extern unsigned long node_percpu_pfn[]; | ||
70 | extern unsigned long node_free_pfn[]; | ||
71 | #ifdef CONFIG_HIGHMEM | ||
72 | extern unsigned long node_lowmem_end_pfn[]; | ||
73 | #endif | ||
74 | #ifdef CONFIG_PCI | ||
75 | extern unsigned long pci_reserve_start_pfn; | ||
76 | extern unsigned long pci_reserve_end_pfn; | ||
77 | #endif | ||
78 | |||
79 | #endif /* CONFIG_DISCONTIGMEM */ | ||
80 | |||
81 | #endif /* _ASM_TILE_MMZONE_H */ | ||
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h new file mode 100644 index 000000000000..1e4b79fe8584 --- /dev/null +++ b/arch/tile/include/asm/module.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/module.h> | |||
diff --git a/arch/tile/include/asm/msgbuf.h b/arch/tile/include/asm/msgbuf.h new file mode 100644 index 000000000000..809134c644a6 --- /dev/null +++ b/arch/tile/include/asm/msgbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/msgbuf.h> | |||
diff --git a/arch/tile/include/asm/mutex.h b/arch/tile/include/asm/mutex.h new file mode 100644 index 000000000000..ff6101aa2c71 --- /dev/null +++ b/arch/tile/include/asm/mutex.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/mutex-dec.h> | |||
diff --git a/arch/tile/include/asm/opcode-tile.h b/arch/tile/include/asm/opcode-tile.h new file mode 100644 index 000000000000..ba38959137d7 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_TILE_H | ||
16 | #define _ASM_TILE_OPCODE_TILE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode-tile_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode-tile_32.h> | ||
24 | #endif | ||
25 | |||
26 | /* These definitions are not correct for TILE64, so just avoid them. */ | ||
27 | #undef TILE_ELF_MACHINE_CODE | ||
28 | #undef TILE_ELF_NAME | ||
29 | |||
30 | #endif /* _ASM_TILE_OPCODE_TILE_H */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h new file mode 100644 index 000000000000..eda60ecbae3d --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_32.h | |||
@@ -0,0 +1,1506 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_RAISE, | ||
32 | TILE_OPC_ADD, | ||
33 | TILE_OPC_ADD_SN, | ||
34 | TILE_OPC_ADDB, | ||
35 | TILE_OPC_ADDB_SN, | ||
36 | TILE_OPC_ADDBS_U, | ||
37 | TILE_OPC_ADDBS_U_SN, | ||
38 | TILE_OPC_ADDH, | ||
39 | TILE_OPC_ADDH_SN, | ||
40 | TILE_OPC_ADDHS, | ||
41 | TILE_OPC_ADDHS_SN, | ||
42 | TILE_OPC_ADDI, | ||
43 | TILE_OPC_ADDI_SN, | ||
44 | TILE_OPC_ADDIB, | ||
45 | TILE_OPC_ADDIB_SN, | ||
46 | TILE_OPC_ADDIH, | ||
47 | TILE_OPC_ADDIH_SN, | ||
48 | TILE_OPC_ADDLI, | ||
49 | TILE_OPC_ADDLI_SN, | ||
50 | TILE_OPC_ADDLIS, | ||
51 | TILE_OPC_ADDS, | ||
52 | TILE_OPC_ADDS_SN, | ||
53 | TILE_OPC_ADIFFB_U, | ||
54 | TILE_OPC_ADIFFB_U_SN, | ||
55 | TILE_OPC_ADIFFH, | ||
56 | TILE_OPC_ADIFFH_SN, | ||
57 | TILE_OPC_AND, | ||
58 | TILE_OPC_AND_SN, | ||
59 | TILE_OPC_ANDI, | ||
60 | TILE_OPC_ANDI_SN, | ||
61 | TILE_OPC_AULI, | ||
62 | TILE_OPC_AVGB_U, | ||
63 | TILE_OPC_AVGB_U_SN, | ||
64 | TILE_OPC_AVGH, | ||
65 | TILE_OPC_AVGH_SN, | ||
66 | TILE_OPC_BBNS, | ||
67 | TILE_OPC_BBNS_SN, | ||
68 | TILE_OPC_BBNST, | ||
69 | TILE_OPC_BBNST_SN, | ||
70 | TILE_OPC_BBS, | ||
71 | TILE_OPC_BBS_SN, | ||
72 | TILE_OPC_BBST, | ||
73 | TILE_OPC_BBST_SN, | ||
74 | TILE_OPC_BGEZ, | ||
75 | TILE_OPC_BGEZ_SN, | ||
76 | TILE_OPC_BGEZT, | ||
77 | TILE_OPC_BGEZT_SN, | ||
78 | TILE_OPC_BGZ, | ||
79 | TILE_OPC_BGZ_SN, | ||
80 | TILE_OPC_BGZT, | ||
81 | TILE_OPC_BGZT_SN, | ||
82 | TILE_OPC_BITX, | ||
83 | TILE_OPC_BITX_SN, | ||
84 | TILE_OPC_BLEZ, | ||
85 | TILE_OPC_BLEZ_SN, | ||
86 | TILE_OPC_BLEZT, | ||
87 | TILE_OPC_BLEZT_SN, | ||
88 | TILE_OPC_BLZ, | ||
89 | TILE_OPC_BLZ_SN, | ||
90 | TILE_OPC_BLZT, | ||
91 | TILE_OPC_BLZT_SN, | ||
92 | TILE_OPC_BNZ, | ||
93 | TILE_OPC_BNZ_SN, | ||
94 | TILE_OPC_BNZT, | ||
95 | TILE_OPC_BNZT_SN, | ||
96 | TILE_OPC_BYTEX, | ||
97 | TILE_OPC_BYTEX_SN, | ||
98 | TILE_OPC_BZ, | ||
99 | TILE_OPC_BZ_SN, | ||
100 | TILE_OPC_BZT, | ||
101 | TILE_OPC_BZT_SN, | ||
102 | TILE_OPC_CLZ, | ||
103 | TILE_OPC_CLZ_SN, | ||
104 | TILE_OPC_CRC32_32, | ||
105 | TILE_OPC_CRC32_32_SN, | ||
106 | TILE_OPC_CRC32_8, | ||
107 | TILE_OPC_CRC32_8_SN, | ||
108 | TILE_OPC_CTZ, | ||
109 | TILE_OPC_CTZ_SN, | ||
110 | TILE_OPC_DRAIN, | ||
111 | TILE_OPC_DTLBPR, | ||
112 | TILE_OPC_DWORD_ALIGN, | ||
113 | TILE_OPC_DWORD_ALIGN_SN, | ||
114 | TILE_OPC_FINV, | ||
115 | TILE_OPC_FLUSH, | ||
116 | TILE_OPC_FNOP, | ||
117 | TILE_OPC_ICOH, | ||
118 | TILE_OPC_ILL, | ||
119 | TILE_OPC_INTHB, | ||
120 | TILE_OPC_INTHB_SN, | ||
121 | TILE_OPC_INTHH, | ||
122 | TILE_OPC_INTHH_SN, | ||
123 | TILE_OPC_INTLB, | ||
124 | TILE_OPC_INTLB_SN, | ||
125 | TILE_OPC_INTLH, | ||
126 | TILE_OPC_INTLH_SN, | ||
127 | TILE_OPC_INV, | ||
128 | TILE_OPC_IRET, | ||
129 | TILE_OPC_JALB, | ||
130 | TILE_OPC_JALF, | ||
131 | TILE_OPC_JALR, | ||
132 | TILE_OPC_JALRP, | ||
133 | TILE_OPC_JB, | ||
134 | TILE_OPC_JF, | ||
135 | TILE_OPC_JR, | ||
136 | TILE_OPC_JRP, | ||
137 | TILE_OPC_LB, | ||
138 | TILE_OPC_LB_SN, | ||
139 | TILE_OPC_LB_U, | ||
140 | TILE_OPC_LB_U_SN, | ||
141 | TILE_OPC_LBADD, | ||
142 | TILE_OPC_LBADD_SN, | ||
143 | TILE_OPC_LBADD_U, | ||
144 | TILE_OPC_LBADD_U_SN, | ||
145 | TILE_OPC_LH, | ||
146 | TILE_OPC_LH_SN, | ||
147 | TILE_OPC_LH_U, | ||
148 | TILE_OPC_LH_U_SN, | ||
149 | TILE_OPC_LHADD, | ||
150 | TILE_OPC_LHADD_SN, | ||
151 | TILE_OPC_LHADD_U, | ||
152 | TILE_OPC_LHADD_U_SN, | ||
153 | TILE_OPC_LNK, | ||
154 | TILE_OPC_LNK_SN, | ||
155 | TILE_OPC_LW, | ||
156 | TILE_OPC_LW_SN, | ||
157 | TILE_OPC_LW_NA, | ||
158 | TILE_OPC_LW_NA_SN, | ||
159 | TILE_OPC_LWADD, | ||
160 | TILE_OPC_LWADD_SN, | ||
161 | TILE_OPC_LWADD_NA, | ||
162 | TILE_OPC_LWADD_NA_SN, | ||
163 | TILE_OPC_MAXB_U, | ||
164 | TILE_OPC_MAXB_U_SN, | ||
165 | TILE_OPC_MAXH, | ||
166 | TILE_OPC_MAXH_SN, | ||
167 | TILE_OPC_MAXIB_U, | ||
168 | TILE_OPC_MAXIB_U_SN, | ||
169 | TILE_OPC_MAXIH, | ||
170 | TILE_OPC_MAXIH_SN, | ||
171 | TILE_OPC_MF, | ||
172 | TILE_OPC_MFSPR, | ||
173 | TILE_OPC_MINB_U, | ||
174 | TILE_OPC_MINB_U_SN, | ||
175 | TILE_OPC_MINH, | ||
176 | TILE_OPC_MINH_SN, | ||
177 | TILE_OPC_MINIB_U, | ||
178 | TILE_OPC_MINIB_U_SN, | ||
179 | TILE_OPC_MINIH, | ||
180 | TILE_OPC_MINIH_SN, | ||
181 | TILE_OPC_MM, | ||
182 | TILE_OPC_MNZ, | ||
183 | TILE_OPC_MNZ_SN, | ||
184 | TILE_OPC_MNZB, | ||
185 | TILE_OPC_MNZB_SN, | ||
186 | TILE_OPC_MNZH, | ||
187 | TILE_OPC_MNZH_SN, | ||
188 | TILE_OPC_MTSPR, | ||
189 | TILE_OPC_MULHH_SS, | ||
190 | TILE_OPC_MULHH_SS_SN, | ||
191 | TILE_OPC_MULHH_SU, | ||
192 | TILE_OPC_MULHH_SU_SN, | ||
193 | TILE_OPC_MULHH_UU, | ||
194 | TILE_OPC_MULHH_UU_SN, | ||
195 | TILE_OPC_MULHHA_SS, | ||
196 | TILE_OPC_MULHHA_SS_SN, | ||
197 | TILE_OPC_MULHHA_SU, | ||
198 | TILE_OPC_MULHHA_SU_SN, | ||
199 | TILE_OPC_MULHHA_UU, | ||
200 | TILE_OPC_MULHHA_UU_SN, | ||
201 | TILE_OPC_MULHHSA_UU, | ||
202 | TILE_OPC_MULHHSA_UU_SN, | ||
203 | TILE_OPC_MULHL_SS, | ||
204 | TILE_OPC_MULHL_SS_SN, | ||
205 | TILE_OPC_MULHL_SU, | ||
206 | TILE_OPC_MULHL_SU_SN, | ||
207 | TILE_OPC_MULHL_US, | ||
208 | TILE_OPC_MULHL_US_SN, | ||
209 | TILE_OPC_MULHL_UU, | ||
210 | TILE_OPC_MULHL_UU_SN, | ||
211 | TILE_OPC_MULHLA_SS, | ||
212 | TILE_OPC_MULHLA_SS_SN, | ||
213 | TILE_OPC_MULHLA_SU, | ||
214 | TILE_OPC_MULHLA_SU_SN, | ||
215 | TILE_OPC_MULHLA_US, | ||
216 | TILE_OPC_MULHLA_US_SN, | ||
217 | TILE_OPC_MULHLA_UU, | ||
218 | TILE_OPC_MULHLA_UU_SN, | ||
219 | TILE_OPC_MULHLSA_UU, | ||
220 | TILE_OPC_MULHLSA_UU_SN, | ||
221 | TILE_OPC_MULLL_SS, | ||
222 | TILE_OPC_MULLL_SS_SN, | ||
223 | TILE_OPC_MULLL_SU, | ||
224 | TILE_OPC_MULLL_SU_SN, | ||
225 | TILE_OPC_MULLL_UU, | ||
226 | TILE_OPC_MULLL_UU_SN, | ||
227 | TILE_OPC_MULLLA_SS, | ||
228 | TILE_OPC_MULLLA_SS_SN, | ||
229 | TILE_OPC_MULLLA_SU, | ||
230 | TILE_OPC_MULLLA_SU_SN, | ||
231 | TILE_OPC_MULLLA_UU, | ||
232 | TILE_OPC_MULLLA_UU_SN, | ||
233 | TILE_OPC_MULLLSA_UU, | ||
234 | TILE_OPC_MULLLSA_UU_SN, | ||
235 | TILE_OPC_MVNZ, | ||
236 | TILE_OPC_MVNZ_SN, | ||
237 | TILE_OPC_MVZ, | ||
238 | TILE_OPC_MVZ_SN, | ||
239 | TILE_OPC_MZ, | ||
240 | TILE_OPC_MZ_SN, | ||
241 | TILE_OPC_MZB, | ||
242 | TILE_OPC_MZB_SN, | ||
243 | TILE_OPC_MZH, | ||
244 | TILE_OPC_MZH_SN, | ||
245 | TILE_OPC_NAP, | ||
246 | TILE_OPC_NOP, | ||
247 | TILE_OPC_NOR, | ||
248 | TILE_OPC_NOR_SN, | ||
249 | TILE_OPC_OR, | ||
250 | TILE_OPC_OR_SN, | ||
251 | TILE_OPC_ORI, | ||
252 | TILE_OPC_ORI_SN, | ||
253 | TILE_OPC_PACKBS_U, | ||
254 | TILE_OPC_PACKBS_U_SN, | ||
255 | TILE_OPC_PACKHB, | ||
256 | TILE_OPC_PACKHB_SN, | ||
257 | TILE_OPC_PACKHS, | ||
258 | TILE_OPC_PACKHS_SN, | ||
259 | TILE_OPC_PACKLB, | ||
260 | TILE_OPC_PACKLB_SN, | ||
261 | TILE_OPC_PCNT, | ||
262 | TILE_OPC_PCNT_SN, | ||
263 | TILE_OPC_RL, | ||
264 | TILE_OPC_RL_SN, | ||
265 | TILE_OPC_RLI, | ||
266 | TILE_OPC_RLI_SN, | ||
267 | TILE_OPC_S1A, | ||
268 | TILE_OPC_S1A_SN, | ||
269 | TILE_OPC_S2A, | ||
270 | TILE_OPC_S2A_SN, | ||
271 | TILE_OPC_S3A, | ||
272 | TILE_OPC_S3A_SN, | ||
273 | TILE_OPC_SADAB_U, | ||
274 | TILE_OPC_SADAB_U_SN, | ||
275 | TILE_OPC_SADAH, | ||
276 | TILE_OPC_SADAH_SN, | ||
277 | TILE_OPC_SADAH_U, | ||
278 | TILE_OPC_SADAH_U_SN, | ||
279 | TILE_OPC_SADB_U, | ||
280 | TILE_OPC_SADB_U_SN, | ||
281 | TILE_OPC_SADH, | ||
282 | TILE_OPC_SADH_SN, | ||
283 | TILE_OPC_SADH_U, | ||
284 | TILE_OPC_SADH_U_SN, | ||
285 | TILE_OPC_SB, | ||
286 | TILE_OPC_SBADD, | ||
287 | TILE_OPC_SEQ, | ||
288 | TILE_OPC_SEQ_SN, | ||
289 | TILE_OPC_SEQB, | ||
290 | TILE_OPC_SEQB_SN, | ||
291 | TILE_OPC_SEQH, | ||
292 | TILE_OPC_SEQH_SN, | ||
293 | TILE_OPC_SEQI, | ||
294 | TILE_OPC_SEQI_SN, | ||
295 | TILE_OPC_SEQIB, | ||
296 | TILE_OPC_SEQIB_SN, | ||
297 | TILE_OPC_SEQIH, | ||
298 | TILE_OPC_SEQIH_SN, | ||
299 | TILE_OPC_SH, | ||
300 | TILE_OPC_SHADD, | ||
301 | TILE_OPC_SHL, | ||
302 | TILE_OPC_SHL_SN, | ||
303 | TILE_OPC_SHLB, | ||
304 | TILE_OPC_SHLB_SN, | ||
305 | TILE_OPC_SHLH, | ||
306 | TILE_OPC_SHLH_SN, | ||
307 | TILE_OPC_SHLI, | ||
308 | TILE_OPC_SHLI_SN, | ||
309 | TILE_OPC_SHLIB, | ||
310 | TILE_OPC_SHLIB_SN, | ||
311 | TILE_OPC_SHLIH, | ||
312 | TILE_OPC_SHLIH_SN, | ||
313 | TILE_OPC_SHR, | ||
314 | TILE_OPC_SHR_SN, | ||
315 | TILE_OPC_SHRB, | ||
316 | TILE_OPC_SHRB_SN, | ||
317 | TILE_OPC_SHRH, | ||
318 | TILE_OPC_SHRH_SN, | ||
319 | TILE_OPC_SHRI, | ||
320 | TILE_OPC_SHRI_SN, | ||
321 | TILE_OPC_SHRIB, | ||
322 | TILE_OPC_SHRIB_SN, | ||
323 | TILE_OPC_SHRIH, | ||
324 | TILE_OPC_SHRIH_SN, | ||
325 | TILE_OPC_SLT, | ||
326 | TILE_OPC_SLT_SN, | ||
327 | TILE_OPC_SLT_U, | ||
328 | TILE_OPC_SLT_U_SN, | ||
329 | TILE_OPC_SLTB, | ||
330 | TILE_OPC_SLTB_SN, | ||
331 | TILE_OPC_SLTB_U, | ||
332 | TILE_OPC_SLTB_U_SN, | ||
333 | TILE_OPC_SLTE, | ||
334 | TILE_OPC_SLTE_SN, | ||
335 | TILE_OPC_SLTE_U, | ||
336 | TILE_OPC_SLTE_U_SN, | ||
337 | TILE_OPC_SLTEB, | ||
338 | TILE_OPC_SLTEB_SN, | ||
339 | TILE_OPC_SLTEB_U, | ||
340 | TILE_OPC_SLTEB_U_SN, | ||
341 | TILE_OPC_SLTEH, | ||
342 | TILE_OPC_SLTEH_SN, | ||
343 | TILE_OPC_SLTEH_U, | ||
344 | TILE_OPC_SLTEH_U_SN, | ||
345 | TILE_OPC_SLTH, | ||
346 | TILE_OPC_SLTH_SN, | ||
347 | TILE_OPC_SLTH_U, | ||
348 | TILE_OPC_SLTH_U_SN, | ||
349 | TILE_OPC_SLTI, | ||
350 | TILE_OPC_SLTI_SN, | ||
351 | TILE_OPC_SLTI_U, | ||
352 | TILE_OPC_SLTI_U_SN, | ||
353 | TILE_OPC_SLTIB, | ||
354 | TILE_OPC_SLTIB_SN, | ||
355 | TILE_OPC_SLTIB_U, | ||
356 | TILE_OPC_SLTIB_U_SN, | ||
357 | TILE_OPC_SLTIH, | ||
358 | TILE_OPC_SLTIH_SN, | ||
359 | TILE_OPC_SLTIH_U, | ||
360 | TILE_OPC_SLTIH_U_SN, | ||
361 | TILE_OPC_SNE, | ||
362 | TILE_OPC_SNE_SN, | ||
363 | TILE_OPC_SNEB, | ||
364 | TILE_OPC_SNEB_SN, | ||
365 | TILE_OPC_SNEH, | ||
366 | TILE_OPC_SNEH_SN, | ||
367 | TILE_OPC_SRA, | ||
368 | TILE_OPC_SRA_SN, | ||
369 | TILE_OPC_SRAB, | ||
370 | TILE_OPC_SRAB_SN, | ||
371 | TILE_OPC_SRAH, | ||
372 | TILE_OPC_SRAH_SN, | ||
373 | TILE_OPC_SRAI, | ||
374 | TILE_OPC_SRAI_SN, | ||
375 | TILE_OPC_SRAIB, | ||
376 | TILE_OPC_SRAIB_SN, | ||
377 | TILE_OPC_SRAIH, | ||
378 | TILE_OPC_SRAIH_SN, | ||
379 | TILE_OPC_SUB, | ||
380 | TILE_OPC_SUB_SN, | ||
381 | TILE_OPC_SUBB, | ||
382 | TILE_OPC_SUBB_SN, | ||
383 | TILE_OPC_SUBBS_U, | ||
384 | TILE_OPC_SUBBS_U_SN, | ||
385 | TILE_OPC_SUBH, | ||
386 | TILE_OPC_SUBH_SN, | ||
387 | TILE_OPC_SUBHS, | ||
388 | TILE_OPC_SUBHS_SN, | ||
389 | TILE_OPC_SUBS, | ||
390 | TILE_OPC_SUBS_SN, | ||
391 | TILE_OPC_SW, | ||
392 | TILE_OPC_SWADD, | ||
393 | TILE_OPC_SWINT0, | ||
394 | TILE_OPC_SWINT1, | ||
395 | TILE_OPC_SWINT2, | ||
396 | TILE_OPC_SWINT3, | ||
397 | TILE_OPC_TBLIDXB0, | ||
398 | TILE_OPC_TBLIDXB0_SN, | ||
399 | TILE_OPC_TBLIDXB1, | ||
400 | TILE_OPC_TBLIDXB1_SN, | ||
401 | TILE_OPC_TBLIDXB2, | ||
402 | TILE_OPC_TBLIDXB2_SN, | ||
403 | TILE_OPC_TBLIDXB3, | ||
404 | TILE_OPC_TBLIDXB3_SN, | ||
405 | TILE_OPC_TNS, | ||
406 | TILE_OPC_TNS_SN, | ||
407 | TILE_OPC_WH64, | ||
408 | TILE_OPC_XOR, | ||
409 | TILE_OPC_XOR_SN, | ||
410 | TILE_OPC_XORI, | ||
411 | TILE_OPC_XORI_SN, | ||
412 | TILE_OPC_NONE | ||
413 | } tile_mnemonic; | ||
414 | |||
415 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
416 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
417 | |||
418 | |||
419 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
420 | |||
421 | #define TILE_ELF_NAME "elf32-tilepro" | ||
422 | |||
423 | |||
424 | static __inline unsigned int | ||
425 | get_BrOff_SN(tile_bundle_bits num) | ||
426 | { | ||
427 | const unsigned int n = (unsigned int)num; | ||
428 | return (((n >> 0)) & 0x3ff); | ||
429 | } | ||
430 | |||
431 | static __inline unsigned int | ||
432 | get_BrOff_X1(tile_bundle_bits n) | ||
433 | { | ||
434 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
435 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
436 | } | ||
437 | |||
438 | static __inline unsigned int | ||
439 | get_BrType_X1(tile_bundle_bits n) | ||
440 | { | ||
441 | return (((unsigned int)(n >> 31)) & 0xf); | ||
442 | } | ||
443 | |||
444 | static __inline unsigned int | ||
445 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
446 | { | ||
447 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
448 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
449 | } | ||
450 | |||
451 | static __inline unsigned int | ||
452 | get_Dest_SN(tile_bundle_bits num) | ||
453 | { | ||
454 | const unsigned int n = (unsigned int)num; | ||
455 | return (((n >> 2)) & 0x3); | ||
456 | } | ||
457 | |||
458 | static __inline unsigned int | ||
459 | get_Dest_X0(tile_bundle_bits num) | ||
460 | { | ||
461 | const unsigned int n = (unsigned int)num; | ||
462 | return (((n >> 0)) & 0x3f); | ||
463 | } | ||
464 | |||
465 | static __inline unsigned int | ||
466 | get_Dest_X1(tile_bundle_bits n) | ||
467 | { | ||
468 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
469 | } | ||
470 | |||
471 | static __inline unsigned int | ||
472 | get_Dest_Y0(tile_bundle_bits num) | ||
473 | { | ||
474 | const unsigned int n = (unsigned int)num; | ||
475 | return (((n >> 0)) & 0x3f); | ||
476 | } | ||
477 | |||
478 | static __inline unsigned int | ||
479 | get_Dest_Y1(tile_bundle_bits n) | ||
480 | { | ||
481 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
482 | } | ||
483 | |||
484 | static __inline unsigned int | ||
485 | get_Imm16_X0(tile_bundle_bits num) | ||
486 | { | ||
487 | const unsigned int n = (unsigned int)num; | ||
488 | return (((n >> 12)) & 0xffff); | ||
489 | } | ||
490 | |||
491 | static __inline unsigned int | ||
492 | get_Imm16_X1(tile_bundle_bits n) | ||
493 | { | ||
494 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
495 | } | ||
496 | |||
497 | static __inline unsigned int | ||
498 | get_Imm8_SN(tile_bundle_bits num) | ||
499 | { | ||
500 | const unsigned int n = (unsigned int)num; | ||
501 | return (((n >> 0)) & 0xff); | ||
502 | } | ||
503 | |||
504 | static __inline unsigned int | ||
505 | get_Imm8_X0(tile_bundle_bits num) | ||
506 | { | ||
507 | const unsigned int n = (unsigned int)num; | ||
508 | return (((n >> 12)) & 0xff); | ||
509 | } | ||
510 | |||
511 | static __inline unsigned int | ||
512 | get_Imm8_X1(tile_bundle_bits n) | ||
513 | { | ||
514 | return (((unsigned int)(n >> 43)) & 0xff); | ||
515 | } | ||
516 | |||
517 | static __inline unsigned int | ||
518 | get_Imm8_Y0(tile_bundle_bits num) | ||
519 | { | ||
520 | const unsigned int n = (unsigned int)num; | ||
521 | return (((n >> 12)) & 0xff); | ||
522 | } | ||
523 | |||
524 | static __inline unsigned int | ||
525 | get_Imm8_Y1(tile_bundle_bits n) | ||
526 | { | ||
527 | return (((unsigned int)(n >> 43)) & 0xff); | ||
528 | } | ||
529 | |||
530 | static __inline unsigned int | ||
531 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
532 | { | ||
533 | const unsigned int n = (unsigned int)num; | ||
534 | return (((n >> 20)) & 0x7f); | ||
535 | } | ||
536 | |||
537 | static __inline unsigned int | ||
538 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
539 | { | ||
540 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
541 | } | ||
542 | |||
543 | static __inline unsigned int | ||
544 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
545 | { | ||
546 | const unsigned int n = (unsigned int)num; | ||
547 | return (((n >> 8)) & 0x3); | ||
548 | } | ||
549 | |||
550 | static __inline unsigned int | ||
551 | get_JOffLong_X1(tile_bundle_bits n) | ||
552 | { | ||
553 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
554 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
555 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
556 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
557 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_JOff_X1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
564 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
565 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
566 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
567 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
568 | } | ||
569 | |||
570 | static __inline unsigned int | ||
571 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
572 | { | ||
573 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
574 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
575 | } | ||
576 | |||
577 | static __inline unsigned int | ||
578 | get_MMEnd_X0(tile_bundle_bits num) | ||
579 | { | ||
580 | const unsigned int n = (unsigned int)num; | ||
581 | return (((n >> 18)) & 0x1f); | ||
582 | } | ||
583 | |||
584 | static __inline unsigned int | ||
585 | get_MMEnd_X1(tile_bundle_bits n) | ||
586 | { | ||
587 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
588 | } | ||
589 | |||
590 | static __inline unsigned int | ||
591 | get_MMStart_X0(tile_bundle_bits num) | ||
592 | { | ||
593 | const unsigned int n = (unsigned int)num; | ||
594 | return (((n >> 23)) & 0x1f); | ||
595 | } | ||
596 | |||
597 | static __inline unsigned int | ||
598 | get_MMStart_X1(tile_bundle_bits n) | ||
599 | { | ||
600 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
601 | } | ||
602 | |||
603 | static __inline unsigned int | ||
604 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
605 | { | ||
606 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
607 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
608 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
609 | } | ||
610 | |||
611 | static __inline unsigned int | ||
612 | get_Mode(tile_bundle_bits n) | ||
613 | { | ||
614 | return (((unsigned int)(n >> 63)) & 0x1); | ||
615 | } | ||
616 | |||
617 | static __inline unsigned int | ||
618 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
619 | { | ||
620 | const unsigned int n = (unsigned int)num; | ||
621 | return (((n >> 0)) & 0xf); | ||
622 | } | ||
623 | |||
624 | static __inline unsigned int | ||
625 | get_Opcode_SN(tile_bundle_bits num) | ||
626 | { | ||
627 | const unsigned int n = (unsigned int)num; | ||
628 | return (((n >> 10)) & 0x3f); | ||
629 | } | ||
630 | |||
631 | static __inline unsigned int | ||
632 | get_Opcode_X0(tile_bundle_bits num) | ||
633 | { | ||
634 | const unsigned int n = (unsigned int)num; | ||
635 | return (((n >> 28)) & 0x7); | ||
636 | } | ||
637 | |||
638 | static __inline unsigned int | ||
639 | get_Opcode_X1(tile_bundle_bits n) | ||
640 | { | ||
641 | return (((unsigned int)(n >> 59)) & 0xf); | ||
642 | } | ||
643 | |||
644 | static __inline unsigned int | ||
645 | get_Opcode_Y0(tile_bundle_bits num) | ||
646 | { | ||
647 | const unsigned int n = (unsigned int)num; | ||
648 | return (((n >> 27)) & 0xf); | ||
649 | } | ||
650 | |||
651 | static __inline unsigned int | ||
652 | get_Opcode_Y1(tile_bundle_bits n) | ||
653 | { | ||
654 | return (((unsigned int)(n >> 59)) & 0xf); | ||
655 | } | ||
656 | |||
657 | static __inline unsigned int | ||
658 | get_Opcode_Y2(tile_bundle_bits n) | ||
659 | { | ||
660 | return (((unsigned int)(n >> 56)) & 0x7); | ||
661 | } | ||
662 | |||
663 | static __inline unsigned int | ||
664 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
665 | { | ||
666 | const unsigned int n = (unsigned int)num; | ||
667 | return (((n >> 4)) & 0xf); | ||
668 | } | ||
669 | |||
670 | static __inline unsigned int | ||
671 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
672 | { | ||
673 | const unsigned int n = (unsigned int)num; | ||
674 | return (((n >> 18)) & 0x1ff); | ||
675 | } | ||
676 | |||
677 | static __inline unsigned int | ||
678 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
679 | { | ||
680 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
681 | } | ||
682 | |||
683 | static __inline unsigned int | ||
684 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
685 | { | ||
686 | const unsigned int n = (unsigned int)num; | ||
687 | return (((n >> 18)) & 0x3); | ||
688 | } | ||
689 | |||
690 | static __inline unsigned int | ||
691 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
692 | { | ||
693 | return (((unsigned int)(n >> 49)) & 0x3); | ||
694 | } | ||
695 | |||
696 | static __inline unsigned int | ||
697 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
698 | { | ||
699 | const unsigned int n = (unsigned int)num; | ||
700 | return (((n >> 0)) & 0x3ff); | ||
701 | } | ||
702 | |||
703 | static __inline unsigned int | ||
704 | get_S_X0(tile_bundle_bits num) | ||
705 | { | ||
706 | const unsigned int n = (unsigned int)num; | ||
707 | return (((n >> 27)) & 0x1); | ||
708 | } | ||
709 | |||
710 | static __inline unsigned int | ||
711 | get_S_X1(tile_bundle_bits n) | ||
712 | { | ||
713 | return (((unsigned int)(n >> 58)) & 0x1); | ||
714 | } | ||
715 | |||
716 | static __inline unsigned int | ||
717 | get_ShAmt_X0(tile_bundle_bits num) | ||
718 | { | ||
719 | const unsigned int n = (unsigned int)num; | ||
720 | return (((n >> 12)) & 0x1f); | ||
721 | } | ||
722 | |||
723 | static __inline unsigned int | ||
724 | get_ShAmt_X1(tile_bundle_bits n) | ||
725 | { | ||
726 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
727 | } | ||
728 | |||
729 | static __inline unsigned int | ||
730 | get_ShAmt_Y0(tile_bundle_bits num) | ||
731 | { | ||
732 | const unsigned int n = (unsigned int)num; | ||
733 | return (((n >> 12)) & 0x1f); | ||
734 | } | ||
735 | |||
736 | static __inline unsigned int | ||
737 | get_ShAmt_Y1(tile_bundle_bits n) | ||
738 | { | ||
739 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
740 | } | ||
741 | |||
742 | static __inline unsigned int | ||
743 | get_SrcA_X0(tile_bundle_bits num) | ||
744 | { | ||
745 | const unsigned int n = (unsigned int)num; | ||
746 | return (((n >> 6)) & 0x3f); | ||
747 | } | ||
748 | |||
749 | static __inline unsigned int | ||
750 | get_SrcA_X1(tile_bundle_bits n) | ||
751 | { | ||
752 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
753 | } | ||
754 | |||
755 | static __inline unsigned int | ||
756 | get_SrcA_Y0(tile_bundle_bits num) | ||
757 | { | ||
758 | const unsigned int n = (unsigned int)num; | ||
759 | return (((n >> 6)) & 0x3f); | ||
760 | } | ||
761 | |||
762 | static __inline unsigned int | ||
763 | get_SrcA_Y1(tile_bundle_bits n) | ||
764 | { | ||
765 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
766 | } | ||
767 | |||
768 | static __inline unsigned int | ||
769 | get_SrcA_Y2(tile_bundle_bits n) | ||
770 | { | ||
771 | return (((n >> 26)) & 0x00000001) | | ||
772 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
773 | } | ||
774 | |||
775 | static __inline unsigned int | ||
776 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
777 | { | ||
778 | const unsigned int n = (unsigned int)num; | ||
779 | return (((n >> 20)) & 0x3f); | ||
780 | } | ||
781 | |||
782 | static __inline unsigned int | ||
783 | get_SrcB_X0(tile_bundle_bits num) | ||
784 | { | ||
785 | const unsigned int n = (unsigned int)num; | ||
786 | return (((n >> 12)) & 0x3f); | ||
787 | } | ||
788 | |||
789 | static __inline unsigned int | ||
790 | get_SrcB_X1(tile_bundle_bits n) | ||
791 | { | ||
792 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
793 | } | ||
794 | |||
795 | static __inline unsigned int | ||
796 | get_SrcB_Y0(tile_bundle_bits num) | ||
797 | { | ||
798 | const unsigned int n = (unsigned int)num; | ||
799 | return (((n >> 12)) & 0x3f); | ||
800 | } | ||
801 | |||
802 | static __inline unsigned int | ||
803 | get_SrcB_Y1(tile_bundle_bits n) | ||
804 | { | ||
805 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
806 | } | ||
807 | |||
808 | static __inline unsigned int | ||
809 | get_Src_SN(tile_bundle_bits num) | ||
810 | { | ||
811 | const unsigned int n = (unsigned int)num; | ||
812 | return (((n >> 0)) & 0x3); | ||
813 | } | ||
814 | |||
815 | static __inline unsigned int | ||
816 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
817 | { | ||
818 | const unsigned int n = (unsigned int)num; | ||
819 | return (((n >> 12)) & 0x1f); | ||
820 | } | ||
821 | |||
822 | static __inline unsigned int | ||
823 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
824 | { | ||
825 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
826 | } | ||
827 | |||
828 | static __inline unsigned int | ||
829 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
830 | { | ||
831 | const unsigned int n = (unsigned int)num; | ||
832 | return (((n >> 12)) & 0x1f); | ||
833 | } | ||
834 | |||
835 | static __inline unsigned int | ||
836 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
837 | { | ||
838 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
839 | } | ||
840 | |||
841 | static __inline unsigned int | ||
842 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
843 | { | ||
844 | const unsigned int n = (unsigned int)num; | ||
845 | return (((n >> 17)) & 0x3ff); | ||
846 | } | ||
847 | |||
848 | static __inline unsigned int | ||
849 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
850 | { | ||
851 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
852 | } | ||
853 | |||
854 | static __inline unsigned int | ||
855 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
856 | { | ||
857 | const unsigned int n = (unsigned int)num; | ||
858 | return (((n >> 17)) & 0x7); | ||
859 | } | ||
860 | |||
861 | static __inline unsigned int | ||
862 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
863 | { | ||
864 | return (((unsigned int)(n >> 48)) & 0x7); | ||
865 | } | ||
866 | |||
867 | |||
868 | static __inline int | ||
869 | sign_extend(int n, int num_bits) | ||
870 | { | ||
871 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
872 | return (n << shift) >> shift; | ||
873 | } | ||
874 | |||
875 | |||
876 | |||
877 | static __inline tile_bundle_bits | ||
878 | create_BrOff_SN(int num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return ((n & 0x3ff) << 0); | ||
882 | } | ||
883 | |||
884 | static __inline tile_bundle_bits | ||
885 | create_BrOff_X1(int num) | ||
886 | { | ||
887 | const unsigned int n = (unsigned int)num; | ||
888 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
889 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
890 | } | ||
891 | |||
892 | static __inline tile_bundle_bits | ||
893 | create_BrType_X1(int num) | ||
894 | { | ||
895 | const unsigned int n = (unsigned int)num; | ||
896 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
897 | } | ||
898 | |||
899 | static __inline tile_bundle_bits | ||
900 | create_Dest_Imm8_X1(int num) | ||
901 | { | ||
902 | const unsigned int n = (unsigned int)num; | ||
903 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
904 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
905 | } | ||
906 | |||
907 | static __inline tile_bundle_bits | ||
908 | create_Dest_SN(int num) | ||
909 | { | ||
910 | const unsigned int n = (unsigned int)num; | ||
911 | return ((n & 0x3) << 2); | ||
912 | } | ||
913 | |||
914 | static __inline tile_bundle_bits | ||
915 | create_Dest_X0(int num) | ||
916 | { | ||
917 | const unsigned int n = (unsigned int)num; | ||
918 | return ((n & 0x3f) << 0); | ||
919 | } | ||
920 | |||
921 | static __inline tile_bundle_bits | ||
922 | create_Dest_X1(int num) | ||
923 | { | ||
924 | const unsigned int n = (unsigned int)num; | ||
925 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_Dest_Y0(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return ((n & 0x3f) << 0); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Y1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
940 | } | ||
941 | |||
942 | static __inline tile_bundle_bits | ||
943 | create_Imm16_X0(int num) | ||
944 | { | ||
945 | const unsigned int n = (unsigned int)num; | ||
946 | return ((n & 0xffff) << 12); | ||
947 | } | ||
948 | |||
949 | static __inline tile_bundle_bits | ||
950 | create_Imm16_X1(int num) | ||
951 | { | ||
952 | const unsigned int n = (unsigned int)num; | ||
953 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
954 | } | ||
955 | |||
956 | static __inline tile_bundle_bits | ||
957 | create_Imm8_SN(int num) | ||
958 | { | ||
959 | const unsigned int n = (unsigned int)num; | ||
960 | return ((n & 0xff) << 0); | ||
961 | } | ||
962 | |||
963 | static __inline tile_bundle_bits | ||
964 | create_Imm8_X0(int num) | ||
965 | { | ||
966 | const unsigned int n = (unsigned int)num; | ||
967 | return ((n & 0xff) << 12); | ||
968 | } | ||
969 | |||
970 | static __inline tile_bundle_bits | ||
971 | create_Imm8_X1(int num) | ||
972 | { | ||
973 | const unsigned int n = (unsigned int)num; | ||
974 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
975 | } | ||
976 | |||
977 | static __inline tile_bundle_bits | ||
978 | create_Imm8_Y0(int num) | ||
979 | { | ||
980 | const unsigned int n = (unsigned int)num; | ||
981 | return ((n & 0xff) << 12); | ||
982 | } | ||
983 | |||
984 | static __inline tile_bundle_bits | ||
985 | create_Imm8_Y1(int num) | ||
986 | { | ||
987 | const unsigned int n = (unsigned int)num; | ||
988 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
989 | } | ||
990 | |||
991 | static __inline tile_bundle_bits | ||
992 | create_ImmOpcodeExtension_X0(int num) | ||
993 | { | ||
994 | const unsigned int n = (unsigned int)num; | ||
995 | return ((n & 0x7f) << 20); | ||
996 | } | ||
997 | |||
998 | static __inline tile_bundle_bits | ||
999 | create_ImmOpcodeExtension_X1(int num) | ||
1000 | { | ||
1001 | const unsigned int n = (unsigned int)num; | ||
1002 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1003 | } | ||
1004 | |||
1005 | static __inline tile_bundle_bits | ||
1006 | create_ImmRROpcodeExtension_SN(int num) | ||
1007 | { | ||
1008 | const unsigned int n = (unsigned int)num; | ||
1009 | return ((n & 0x3) << 8); | ||
1010 | } | ||
1011 | |||
1012 | static __inline tile_bundle_bits | ||
1013 | create_JOffLong_X1(int num) | ||
1014 | { | ||
1015 | const unsigned int n = (unsigned int)num; | ||
1016 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1017 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1018 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1019 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1020 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1021 | } | ||
1022 | |||
1023 | static __inline tile_bundle_bits | ||
1024 | create_JOff_X1(int num) | ||
1025 | { | ||
1026 | const unsigned int n = (unsigned int)num; | ||
1027 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1028 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1029 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1030 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1031 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_MF_Imm15_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1039 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1040 | } | ||
1041 | |||
1042 | static __inline tile_bundle_bits | ||
1043 | create_MMEnd_X0(int num) | ||
1044 | { | ||
1045 | const unsigned int n = (unsigned int)num; | ||
1046 | return ((n & 0x1f) << 18); | ||
1047 | } | ||
1048 | |||
1049 | static __inline tile_bundle_bits | ||
1050 | create_MMEnd_X1(int num) | ||
1051 | { | ||
1052 | const unsigned int n = (unsigned int)num; | ||
1053 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1054 | } | ||
1055 | |||
1056 | static __inline tile_bundle_bits | ||
1057 | create_MMStart_X0(int num) | ||
1058 | { | ||
1059 | const unsigned int n = (unsigned int)num; | ||
1060 | return ((n & 0x1f) << 23); | ||
1061 | } | ||
1062 | |||
1063 | static __inline tile_bundle_bits | ||
1064 | create_MMStart_X1(int num) | ||
1065 | { | ||
1066 | const unsigned int n = (unsigned int)num; | ||
1067 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MT_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1075 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1076 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1077 | } | ||
1078 | |||
1079 | static __inline tile_bundle_bits | ||
1080 | create_Mode(int num) | ||
1081 | { | ||
1082 | const unsigned int n = (unsigned int)num; | ||
1083 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1084 | } | ||
1085 | |||
1086 | static __inline tile_bundle_bits | ||
1087 | create_NoRegOpcodeExtension_SN(int num) | ||
1088 | { | ||
1089 | const unsigned int n = (unsigned int)num; | ||
1090 | return ((n & 0xf) << 0); | ||
1091 | } | ||
1092 | |||
1093 | static __inline tile_bundle_bits | ||
1094 | create_Opcode_SN(int num) | ||
1095 | { | ||
1096 | const unsigned int n = (unsigned int)num; | ||
1097 | return ((n & 0x3f) << 10); | ||
1098 | } | ||
1099 | |||
1100 | static __inline tile_bundle_bits | ||
1101 | create_Opcode_X0(int num) | ||
1102 | { | ||
1103 | const unsigned int n = (unsigned int)num; | ||
1104 | return ((n & 0x7) << 28); | ||
1105 | } | ||
1106 | |||
1107 | static __inline tile_bundle_bits | ||
1108 | create_Opcode_X1(int num) | ||
1109 | { | ||
1110 | const unsigned int n = (unsigned int)num; | ||
1111 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1112 | } | ||
1113 | |||
1114 | static __inline tile_bundle_bits | ||
1115 | create_Opcode_Y0(int num) | ||
1116 | { | ||
1117 | const unsigned int n = (unsigned int)num; | ||
1118 | return ((n & 0xf) << 27); | ||
1119 | } | ||
1120 | |||
1121 | static __inline tile_bundle_bits | ||
1122 | create_Opcode_Y1(int num) | ||
1123 | { | ||
1124 | const unsigned int n = (unsigned int)num; | ||
1125 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1126 | } | ||
1127 | |||
1128 | static __inline tile_bundle_bits | ||
1129 | create_Opcode_Y2(int num) | ||
1130 | { | ||
1131 | const unsigned int n = (unsigned int)num; | ||
1132 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1133 | } | ||
1134 | |||
1135 | static __inline tile_bundle_bits | ||
1136 | create_RROpcodeExtension_SN(int num) | ||
1137 | { | ||
1138 | const unsigned int n = (unsigned int)num; | ||
1139 | return ((n & 0xf) << 4); | ||
1140 | } | ||
1141 | |||
1142 | static __inline tile_bundle_bits | ||
1143 | create_RRROpcodeExtension_X0(int num) | ||
1144 | { | ||
1145 | const unsigned int n = (unsigned int)num; | ||
1146 | return ((n & 0x1ff) << 18); | ||
1147 | } | ||
1148 | |||
1149 | static __inline tile_bundle_bits | ||
1150 | create_RRROpcodeExtension_X1(int num) | ||
1151 | { | ||
1152 | const unsigned int n = (unsigned int)num; | ||
1153 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1154 | } | ||
1155 | |||
1156 | static __inline tile_bundle_bits | ||
1157 | create_RRROpcodeExtension_Y0(int num) | ||
1158 | { | ||
1159 | const unsigned int n = (unsigned int)num; | ||
1160 | return ((n & 0x3) << 18); | ||
1161 | } | ||
1162 | |||
1163 | static __inline tile_bundle_bits | ||
1164 | create_RRROpcodeExtension_Y1(int num) | ||
1165 | { | ||
1166 | const unsigned int n = (unsigned int)num; | ||
1167 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1168 | } | ||
1169 | |||
1170 | static __inline tile_bundle_bits | ||
1171 | create_RouteOpcodeExtension_SN(int num) | ||
1172 | { | ||
1173 | const unsigned int n = (unsigned int)num; | ||
1174 | return ((n & 0x3ff) << 0); | ||
1175 | } | ||
1176 | |||
1177 | static __inline tile_bundle_bits | ||
1178 | create_S_X0(int num) | ||
1179 | { | ||
1180 | const unsigned int n = (unsigned int)num; | ||
1181 | return ((n & 0x1) << 27); | ||
1182 | } | ||
1183 | |||
1184 | static __inline tile_bundle_bits | ||
1185 | create_S_X1(int num) | ||
1186 | { | ||
1187 | const unsigned int n = (unsigned int)num; | ||
1188 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1189 | } | ||
1190 | |||
1191 | static __inline tile_bundle_bits | ||
1192 | create_ShAmt_X0(int num) | ||
1193 | { | ||
1194 | const unsigned int n = (unsigned int)num; | ||
1195 | return ((n & 0x1f) << 12); | ||
1196 | } | ||
1197 | |||
1198 | static __inline tile_bundle_bits | ||
1199 | create_ShAmt_X1(int num) | ||
1200 | { | ||
1201 | const unsigned int n = (unsigned int)num; | ||
1202 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1203 | } | ||
1204 | |||
1205 | static __inline tile_bundle_bits | ||
1206 | create_ShAmt_Y0(int num) | ||
1207 | { | ||
1208 | const unsigned int n = (unsigned int)num; | ||
1209 | return ((n & 0x1f) << 12); | ||
1210 | } | ||
1211 | |||
1212 | static __inline tile_bundle_bits | ||
1213 | create_ShAmt_Y1(int num) | ||
1214 | { | ||
1215 | const unsigned int n = (unsigned int)num; | ||
1216 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1217 | } | ||
1218 | |||
1219 | static __inline tile_bundle_bits | ||
1220 | create_SrcA_X0(int num) | ||
1221 | { | ||
1222 | const unsigned int n = (unsigned int)num; | ||
1223 | return ((n & 0x3f) << 6); | ||
1224 | } | ||
1225 | |||
1226 | static __inline tile_bundle_bits | ||
1227 | create_SrcA_X1(int num) | ||
1228 | { | ||
1229 | const unsigned int n = (unsigned int)num; | ||
1230 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1231 | } | ||
1232 | |||
1233 | static __inline tile_bundle_bits | ||
1234 | create_SrcA_Y0(int num) | ||
1235 | { | ||
1236 | const unsigned int n = (unsigned int)num; | ||
1237 | return ((n & 0x3f) << 6); | ||
1238 | } | ||
1239 | |||
1240 | static __inline tile_bundle_bits | ||
1241 | create_SrcA_Y1(int num) | ||
1242 | { | ||
1243 | const unsigned int n = (unsigned int)num; | ||
1244 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1245 | } | ||
1246 | |||
1247 | static __inline tile_bundle_bits | ||
1248 | create_SrcA_Y2(int num) | ||
1249 | { | ||
1250 | const unsigned int n = (unsigned int)num; | ||
1251 | return ((n & 0x00000001) << 26) | | ||
1252 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcBDest_Y2(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 20); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcB_X0(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return ((n & 0x3f) << 12); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcB_X1(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcB_Y0(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return ((n & 0x3f) << 12); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcB_Y1(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1288 | } | ||
1289 | |||
1290 | static __inline tile_bundle_bits | ||
1291 | create_Src_SN(int num) | ||
1292 | { | ||
1293 | const unsigned int n = (unsigned int)num; | ||
1294 | return ((n & 0x3) << 0); | ||
1295 | } | ||
1296 | |||
1297 | static __inline tile_bundle_bits | ||
1298 | create_UnOpcodeExtension_X0(int num) | ||
1299 | { | ||
1300 | const unsigned int n = (unsigned int)num; | ||
1301 | return ((n & 0x1f) << 12); | ||
1302 | } | ||
1303 | |||
1304 | static __inline tile_bundle_bits | ||
1305 | create_UnOpcodeExtension_X1(int num) | ||
1306 | { | ||
1307 | const unsigned int n = (unsigned int)num; | ||
1308 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1309 | } | ||
1310 | |||
1311 | static __inline tile_bundle_bits | ||
1312 | create_UnOpcodeExtension_Y0(int num) | ||
1313 | { | ||
1314 | const unsigned int n = (unsigned int)num; | ||
1315 | return ((n & 0x1f) << 12); | ||
1316 | } | ||
1317 | |||
1318 | static __inline tile_bundle_bits | ||
1319 | create_UnOpcodeExtension_Y1(int num) | ||
1320 | { | ||
1321 | const unsigned int n = (unsigned int)num; | ||
1322 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1323 | } | ||
1324 | |||
1325 | static __inline tile_bundle_bits | ||
1326 | create_UnShOpcodeExtension_X0(int num) | ||
1327 | { | ||
1328 | const unsigned int n = (unsigned int)num; | ||
1329 | return ((n & 0x3ff) << 17); | ||
1330 | } | ||
1331 | |||
1332 | static __inline tile_bundle_bits | ||
1333 | create_UnShOpcodeExtension_X1(int num) | ||
1334 | { | ||
1335 | const unsigned int n = (unsigned int)num; | ||
1336 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1337 | } | ||
1338 | |||
1339 | static __inline tile_bundle_bits | ||
1340 | create_UnShOpcodeExtension_Y0(int num) | ||
1341 | { | ||
1342 | const unsigned int n = (unsigned int)num; | ||
1343 | return ((n & 0x7) << 17); | ||
1344 | } | ||
1345 | |||
1346 | static __inline tile_bundle_bits | ||
1347 | create_UnShOpcodeExtension_Y1(int num) | ||
1348 | { | ||
1349 | const unsigned int n = (unsigned int)num; | ||
1350 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | |||
1355 | typedef enum | ||
1356 | { | ||
1357 | TILE_PIPELINE_X0, | ||
1358 | TILE_PIPELINE_X1, | ||
1359 | TILE_PIPELINE_Y0, | ||
1360 | TILE_PIPELINE_Y1, | ||
1361 | TILE_PIPELINE_Y2, | ||
1362 | } tile_pipeline; | ||
1363 | |||
1364 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1365 | |||
1366 | typedef enum | ||
1367 | { | ||
1368 | TILE_OP_TYPE_REGISTER, | ||
1369 | TILE_OP_TYPE_IMMEDIATE, | ||
1370 | TILE_OP_TYPE_ADDRESS, | ||
1371 | TILE_OP_TYPE_SPR | ||
1372 | } tile_operand_type; | ||
1373 | |||
1374 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1375 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1376 | |||
1377 | enum | ||
1378 | { | ||
1379 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1380 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1381 | |||
1382 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1383 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1384 | |||
1385 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1386 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1387 | |||
1388 | /* Instructions take this many bytes. */ | ||
1389 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1390 | |||
1391 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1392 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1393 | |||
1394 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1395 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1396 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1397 | |||
1398 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1399 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1400 | |||
1401 | /* Static network instructions take this many bytes. */ | ||
1402 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1403 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1404 | |||
1405 | /* Number of registers (some are magic, such as network I/O). */ | ||
1406 | TILE_NUM_REGISTERS = 64, | ||
1407 | |||
1408 | /* Number of static network registers. */ | ||
1409 | TILE_NUM_SN_REGISTERS = 4 | ||
1410 | }; | ||
1411 | |||
1412 | |||
1413 | struct tile_operand | ||
1414 | { | ||
1415 | /* Is this operand a register, immediate or address? */ | ||
1416 | tile_operand_type type; | ||
1417 | |||
1418 | /* The default relocation type for this operand. */ | ||
1419 | signed int default_reloc : 16; | ||
1420 | |||
1421 | /* How many bits is this value? (used for range checking) */ | ||
1422 | unsigned int num_bits : 5; | ||
1423 | |||
1424 | /* Is the value signed? (used for range checking) */ | ||
1425 | unsigned int is_signed : 1; | ||
1426 | |||
1427 | /* Is this operand a source register? */ | ||
1428 | unsigned int is_src_reg : 1; | ||
1429 | |||
1430 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1431 | unsigned int is_dest_reg : 1; | ||
1432 | |||
1433 | /* Is this operand PC-relative? */ | ||
1434 | unsigned int is_pc_relative : 1; | ||
1435 | |||
1436 | /* By how many bits do we right shift the value before inserting? */ | ||
1437 | unsigned int rightshift : 2; | ||
1438 | |||
1439 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1440 | tile_bundle_bits (*insert) (int op); | ||
1441 | |||
1442 | /* Extract this operand and return it. */ | ||
1443 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1444 | }; | ||
1445 | |||
1446 | |||
1447 | extern const struct tile_operand tile_operands[]; | ||
1448 | |||
1449 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1450 | extern const unsigned short * const | ||
1451 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1452 | |||
1453 | |||
1454 | struct tile_opcode | ||
1455 | { | ||
1456 | /* The opcode mnemonic, e.g. "add" */ | ||
1457 | const char *name; | ||
1458 | |||
1459 | /* The enum value for this mnemonic. */ | ||
1460 | tile_mnemonic mnemonic; | ||
1461 | |||
1462 | /* A bit mask of which of the five pipes this instruction | ||
1463 | is compatible with: | ||
1464 | X0 0x01 | ||
1465 | X1 0x02 | ||
1466 | Y0 0x04 | ||
1467 | Y1 0x08 | ||
1468 | Y2 0x10 */ | ||
1469 | unsigned char pipes; | ||
1470 | |||
1471 | /* How many operands are there? */ | ||
1472 | unsigned char num_operands; | ||
1473 | |||
1474 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1475 | unsigned char implicitly_written_register; | ||
1476 | |||
1477 | /* Can this be bundled with other instructions (almost always true). */ | ||
1478 | unsigned char can_bundle; | ||
1479 | |||
1480 | /* The description of the operands. Each of these is an | ||
1481 | * index into the tile_operands[] table. */ | ||
1482 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1483 | |||
1484 | }; | ||
1485 | |||
1486 | extern const struct tile_opcode tile_opcodes[]; | ||
1487 | |||
1488 | |||
1489 | /* Used for non-textual disassembly into structs. */ | ||
1490 | struct tile_decoded_instruction | ||
1491 | { | ||
1492 | const struct tile_opcode *opcode; | ||
1493 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1494 | int operand_values[TILE_MAX_OPERANDS]; | ||
1495 | }; | ||
1496 | |||
1497 | |||
1498 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1499 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1500 | unsigned int pc, | ||
1501 | struct tile_decoded_instruction | ||
1502 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1503 | |||
1504 | |||
1505 | |||
1506 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h new file mode 100644 index 000000000000..eda60ecbae3d --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_64.h | |||
@@ -0,0 +1,1506 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_RAISE, | ||
32 | TILE_OPC_ADD, | ||
33 | TILE_OPC_ADD_SN, | ||
34 | TILE_OPC_ADDB, | ||
35 | TILE_OPC_ADDB_SN, | ||
36 | TILE_OPC_ADDBS_U, | ||
37 | TILE_OPC_ADDBS_U_SN, | ||
38 | TILE_OPC_ADDH, | ||
39 | TILE_OPC_ADDH_SN, | ||
40 | TILE_OPC_ADDHS, | ||
41 | TILE_OPC_ADDHS_SN, | ||
42 | TILE_OPC_ADDI, | ||
43 | TILE_OPC_ADDI_SN, | ||
44 | TILE_OPC_ADDIB, | ||
45 | TILE_OPC_ADDIB_SN, | ||
46 | TILE_OPC_ADDIH, | ||
47 | TILE_OPC_ADDIH_SN, | ||
48 | TILE_OPC_ADDLI, | ||
49 | TILE_OPC_ADDLI_SN, | ||
50 | TILE_OPC_ADDLIS, | ||
51 | TILE_OPC_ADDS, | ||
52 | TILE_OPC_ADDS_SN, | ||
53 | TILE_OPC_ADIFFB_U, | ||
54 | TILE_OPC_ADIFFB_U_SN, | ||
55 | TILE_OPC_ADIFFH, | ||
56 | TILE_OPC_ADIFFH_SN, | ||
57 | TILE_OPC_AND, | ||
58 | TILE_OPC_AND_SN, | ||
59 | TILE_OPC_ANDI, | ||
60 | TILE_OPC_ANDI_SN, | ||
61 | TILE_OPC_AULI, | ||
62 | TILE_OPC_AVGB_U, | ||
63 | TILE_OPC_AVGB_U_SN, | ||
64 | TILE_OPC_AVGH, | ||
65 | TILE_OPC_AVGH_SN, | ||
66 | TILE_OPC_BBNS, | ||
67 | TILE_OPC_BBNS_SN, | ||
68 | TILE_OPC_BBNST, | ||
69 | TILE_OPC_BBNST_SN, | ||
70 | TILE_OPC_BBS, | ||
71 | TILE_OPC_BBS_SN, | ||
72 | TILE_OPC_BBST, | ||
73 | TILE_OPC_BBST_SN, | ||
74 | TILE_OPC_BGEZ, | ||
75 | TILE_OPC_BGEZ_SN, | ||
76 | TILE_OPC_BGEZT, | ||
77 | TILE_OPC_BGEZT_SN, | ||
78 | TILE_OPC_BGZ, | ||
79 | TILE_OPC_BGZ_SN, | ||
80 | TILE_OPC_BGZT, | ||
81 | TILE_OPC_BGZT_SN, | ||
82 | TILE_OPC_BITX, | ||
83 | TILE_OPC_BITX_SN, | ||
84 | TILE_OPC_BLEZ, | ||
85 | TILE_OPC_BLEZ_SN, | ||
86 | TILE_OPC_BLEZT, | ||
87 | TILE_OPC_BLEZT_SN, | ||
88 | TILE_OPC_BLZ, | ||
89 | TILE_OPC_BLZ_SN, | ||
90 | TILE_OPC_BLZT, | ||
91 | TILE_OPC_BLZT_SN, | ||
92 | TILE_OPC_BNZ, | ||
93 | TILE_OPC_BNZ_SN, | ||
94 | TILE_OPC_BNZT, | ||
95 | TILE_OPC_BNZT_SN, | ||
96 | TILE_OPC_BYTEX, | ||
97 | TILE_OPC_BYTEX_SN, | ||
98 | TILE_OPC_BZ, | ||
99 | TILE_OPC_BZ_SN, | ||
100 | TILE_OPC_BZT, | ||
101 | TILE_OPC_BZT_SN, | ||
102 | TILE_OPC_CLZ, | ||
103 | TILE_OPC_CLZ_SN, | ||
104 | TILE_OPC_CRC32_32, | ||
105 | TILE_OPC_CRC32_32_SN, | ||
106 | TILE_OPC_CRC32_8, | ||
107 | TILE_OPC_CRC32_8_SN, | ||
108 | TILE_OPC_CTZ, | ||
109 | TILE_OPC_CTZ_SN, | ||
110 | TILE_OPC_DRAIN, | ||
111 | TILE_OPC_DTLBPR, | ||
112 | TILE_OPC_DWORD_ALIGN, | ||
113 | TILE_OPC_DWORD_ALIGN_SN, | ||
114 | TILE_OPC_FINV, | ||
115 | TILE_OPC_FLUSH, | ||
116 | TILE_OPC_FNOP, | ||
117 | TILE_OPC_ICOH, | ||
118 | TILE_OPC_ILL, | ||
119 | TILE_OPC_INTHB, | ||
120 | TILE_OPC_INTHB_SN, | ||
121 | TILE_OPC_INTHH, | ||
122 | TILE_OPC_INTHH_SN, | ||
123 | TILE_OPC_INTLB, | ||
124 | TILE_OPC_INTLB_SN, | ||
125 | TILE_OPC_INTLH, | ||
126 | TILE_OPC_INTLH_SN, | ||
127 | TILE_OPC_INV, | ||
128 | TILE_OPC_IRET, | ||
129 | TILE_OPC_JALB, | ||
130 | TILE_OPC_JALF, | ||
131 | TILE_OPC_JALR, | ||
132 | TILE_OPC_JALRP, | ||
133 | TILE_OPC_JB, | ||
134 | TILE_OPC_JF, | ||
135 | TILE_OPC_JR, | ||
136 | TILE_OPC_JRP, | ||
137 | TILE_OPC_LB, | ||
138 | TILE_OPC_LB_SN, | ||
139 | TILE_OPC_LB_U, | ||
140 | TILE_OPC_LB_U_SN, | ||
141 | TILE_OPC_LBADD, | ||
142 | TILE_OPC_LBADD_SN, | ||
143 | TILE_OPC_LBADD_U, | ||
144 | TILE_OPC_LBADD_U_SN, | ||
145 | TILE_OPC_LH, | ||
146 | TILE_OPC_LH_SN, | ||
147 | TILE_OPC_LH_U, | ||
148 | TILE_OPC_LH_U_SN, | ||
149 | TILE_OPC_LHADD, | ||
150 | TILE_OPC_LHADD_SN, | ||
151 | TILE_OPC_LHADD_U, | ||
152 | TILE_OPC_LHADD_U_SN, | ||
153 | TILE_OPC_LNK, | ||
154 | TILE_OPC_LNK_SN, | ||
155 | TILE_OPC_LW, | ||
156 | TILE_OPC_LW_SN, | ||
157 | TILE_OPC_LW_NA, | ||
158 | TILE_OPC_LW_NA_SN, | ||
159 | TILE_OPC_LWADD, | ||
160 | TILE_OPC_LWADD_SN, | ||
161 | TILE_OPC_LWADD_NA, | ||
162 | TILE_OPC_LWADD_NA_SN, | ||
163 | TILE_OPC_MAXB_U, | ||
164 | TILE_OPC_MAXB_U_SN, | ||
165 | TILE_OPC_MAXH, | ||
166 | TILE_OPC_MAXH_SN, | ||
167 | TILE_OPC_MAXIB_U, | ||
168 | TILE_OPC_MAXIB_U_SN, | ||
169 | TILE_OPC_MAXIH, | ||
170 | TILE_OPC_MAXIH_SN, | ||
171 | TILE_OPC_MF, | ||
172 | TILE_OPC_MFSPR, | ||
173 | TILE_OPC_MINB_U, | ||
174 | TILE_OPC_MINB_U_SN, | ||
175 | TILE_OPC_MINH, | ||
176 | TILE_OPC_MINH_SN, | ||
177 | TILE_OPC_MINIB_U, | ||
178 | TILE_OPC_MINIB_U_SN, | ||
179 | TILE_OPC_MINIH, | ||
180 | TILE_OPC_MINIH_SN, | ||
181 | TILE_OPC_MM, | ||
182 | TILE_OPC_MNZ, | ||
183 | TILE_OPC_MNZ_SN, | ||
184 | TILE_OPC_MNZB, | ||
185 | TILE_OPC_MNZB_SN, | ||
186 | TILE_OPC_MNZH, | ||
187 | TILE_OPC_MNZH_SN, | ||
188 | TILE_OPC_MTSPR, | ||
189 | TILE_OPC_MULHH_SS, | ||
190 | TILE_OPC_MULHH_SS_SN, | ||
191 | TILE_OPC_MULHH_SU, | ||
192 | TILE_OPC_MULHH_SU_SN, | ||
193 | TILE_OPC_MULHH_UU, | ||
194 | TILE_OPC_MULHH_UU_SN, | ||
195 | TILE_OPC_MULHHA_SS, | ||
196 | TILE_OPC_MULHHA_SS_SN, | ||
197 | TILE_OPC_MULHHA_SU, | ||
198 | TILE_OPC_MULHHA_SU_SN, | ||
199 | TILE_OPC_MULHHA_UU, | ||
200 | TILE_OPC_MULHHA_UU_SN, | ||
201 | TILE_OPC_MULHHSA_UU, | ||
202 | TILE_OPC_MULHHSA_UU_SN, | ||
203 | TILE_OPC_MULHL_SS, | ||
204 | TILE_OPC_MULHL_SS_SN, | ||
205 | TILE_OPC_MULHL_SU, | ||
206 | TILE_OPC_MULHL_SU_SN, | ||
207 | TILE_OPC_MULHL_US, | ||
208 | TILE_OPC_MULHL_US_SN, | ||
209 | TILE_OPC_MULHL_UU, | ||
210 | TILE_OPC_MULHL_UU_SN, | ||
211 | TILE_OPC_MULHLA_SS, | ||
212 | TILE_OPC_MULHLA_SS_SN, | ||
213 | TILE_OPC_MULHLA_SU, | ||
214 | TILE_OPC_MULHLA_SU_SN, | ||
215 | TILE_OPC_MULHLA_US, | ||
216 | TILE_OPC_MULHLA_US_SN, | ||
217 | TILE_OPC_MULHLA_UU, | ||
218 | TILE_OPC_MULHLA_UU_SN, | ||
219 | TILE_OPC_MULHLSA_UU, | ||
220 | TILE_OPC_MULHLSA_UU_SN, | ||
221 | TILE_OPC_MULLL_SS, | ||
222 | TILE_OPC_MULLL_SS_SN, | ||
223 | TILE_OPC_MULLL_SU, | ||
224 | TILE_OPC_MULLL_SU_SN, | ||
225 | TILE_OPC_MULLL_UU, | ||
226 | TILE_OPC_MULLL_UU_SN, | ||
227 | TILE_OPC_MULLLA_SS, | ||
228 | TILE_OPC_MULLLA_SS_SN, | ||
229 | TILE_OPC_MULLLA_SU, | ||
230 | TILE_OPC_MULLLA_SU_SN, | ||
231 | TILE_OPC_MULLLA_UU, | ||
232 | TILE_OPC_MULLLA_UU_SN, | ||
233 | TILE_OPC_MULLLSA_UU, | ||
234 | TILE_OPC_MULLLSA_UU_SN, | ||
235 | TILE_OPC_MVNZ, | ||
236 | TILE_OPC_MVNZ_SN, | ||
237 | TILE_OPC_MVZ, | ||
238 | TILE_OPC_MVZ_SN, | ||
239 | TILE_OPC_MZ, | ||
240 | TILE_OPC_MZ_SN, | ||
241 | TILE_OPC_MZB, | ||
242 | TILE_OPC_MZB_SN, | ||
243 | TILE_OPC_MZH, | ||
244 | TILE_OPC_MZH_SN, | ||
245 | TILE_OPC_NAP, | ||
246 | TILE_OPC_NOP, | ||
247 | TILE_OPC_NOR, | ||
248 | TILE_OPC_NOR_SN, | ||
249 | TILE_OPC_OR, | ||
250 | TILE_OPC_OR_SN, | ||
251 | TILE_OPC_ORI, | ||
252 | TILE_OPC_ORI_SN, | ||
253 | TILE_OPC_PACKBS_U, | ||
254 | TILE_OPC_PACKBS_U_SN, | ||
255 | TILE_OPC_PACKHB, | ||
256 | TILE_OPC_PACKHB_SN, | ||
257 | TILE_OPC_PACKHS, | ||
258 | TILE_OPC_PACKHS_SN, | ||
259 | TILE_OPC_PACKLB, | ||
260 | TILE_OPC_PACKLB_SN, | ||
261 | TILE_OPC_PCNT, | ||
262 | TILE_OPC_PCNT_SN, | ||
263 | TILE_OPC_RL, | ||
264 | TILE_OPC_RL_SN, | ||
265 | TILE_OPC_RLI, | ||
266 | TILE_OPC_RLI_SN, | ||
267 | TILE_OPC_S1A, | ||
268 | TILE_OPC_S1A_SN, | ||
269 | TILE_OPC_S2A, | ||
270 | TILE_OPC_S2A_SN, | ||
271 | TILE_OPC_S3A, | ||
272 | TILE_OPC_S3A_SN, | ||
273 | TILE_OPC_SADAB_U, | ||
274 | TILE_OPC_SADAB_U_SN, | ||
275 | TILE_OPC_SADAH, | ||
276 | TILE_OPC_SADAH_SN, | ||
277 | TILE_OPC_SADAH_U, | ||
278 | TILE_OPC_SADAH_U_SN, | ||
279 | TILE_OPC_SADB_U, | ||
280 | TILE_OPC_SADB_U_SN, | ||
281 | TILE_OPC_SADH, | ||
282 | TILE_OPC_SADH_SN, | ||
283 | TILE_OPC_SADH_U, | ||
284 | TILE_OPC_SADH_U_SN, | ||
285 | TILE_OPC_SB, | ||
286 | TILE_OPC_SBADD, | ||
287 | TILE_OPC_SEQ, | ||
288 | TILE_OPC_SEQ_SN, | ||
289 | TILE_OPC_SEQB, | ||
290 | TILE_OPC_SEQB_SN, | ||
291 | TILE_OPC_SEQH, | ||
292 | TILE_OPC_SEQH_SN, | ||
293 | TILE_OPC_SEQI, | ||
294 | TILE_OPC_SEQI_SN, | ||
295 | TILE_OPC_SEQIB, | ||
296 | TILE_OPC_SEQIB_SN, | ||
297 | TILE_OPC_SEQIH, | ||
298 | TILE_OPC_SEQIH_SN, | ||
299 | TILE_OPC_SH, | ||
300 | TILE_OPC_SHADD, | ||
301 | TILE_OPC_SHL, | ||
302 | TILE_OPC_SHL_SN, | ||
303 | TILE_OPC_SHLB, | ||
304 | TILE_OPC_SHLB_SN, | ||
305 | TILE_OPC_SHLH, | ||
306 | TILE_OPC_SHLH_SN, | ||
307 | TILE_OPC_SHLI, | ||
308 | TILE_OPC_SHLI_SN, | ||
309 | TILE_OPC_SHLIB, | ||
310 | TILE_OPC_SHLIB_SN, | ||
311 | TILE_OPC_SHLIH, | ||
312 | TILE_OPC_SHLIH_SN, | ||
313 | TILE_OPC_SHR, | ||
314 | TILE_OPC_SHR_SN, | ||
315 | TILE_OPC_SHRB, | ||
316 | TILE_OPC_SHRB_SN, | ||
317 | TILE_OPC_SHRH, | ||
318 | TILE_OPC_SHRH_SN, | ||
319 | TILE_OPC_SHRI, | ||
320 | TILE_OPC_SHRI_SN, | ||
321 | TILE_OPC_SHRIB, | ||
322 | TILE_OPC_SHRIB_SN, | ||
323 | TILE_OPC_SHRIH, | ||
324 | TILE_OPC_SHRIH_SN, | ||
325 | TILE_OPC_SLT, | ||
326 | TILE_OPC_SLT_SN, | ||
327 | TILE_OPC_SLT_U, | ||
328 | TILE_OPC_SLT_U_SN, | ||
329 | TILE_OPC_SLTB, | ||
330 | TILE_OPC_SLTB_SN, | ||
331 | TILE_OPC_SLTB_U, | ||
332 | TILE_OPC_SLTB_U_SN, | ||
333 | TILE_OPC_SLTE, | ||
334 | TILE_OPC_SLTE_SN, | ||
335 | TILE_OPC_SLTE_U, | ||
336 | TILE_OPC_SLTE_U_SN, | ||
337 | TILE_OPC_SLTEB, | ||
338 | TILE_OPC_SLTEB_SN, | ||
339 | TILE_OPC_SLTEB_U, | ||
340 | TILE_OPC_SLTEB_U_SN, | ||
341 | TILE_OPC_SLTEH, | ||
342 | TILE_OPC_SLTEH_SN, | ||
343 | TILE_OPC_SLTEH_U, | ||
344 | TILE_OPC_SLTEH_U_SN, | ||
345 | TILE_OPC_SLTH, | ||
346 | TILE_OPC_SLTH_SN, | ||
347 | TILE_OPC_SLTH_U, | ||
348 | TILE_OPC_SLTH_U_SN, | ||
349 | TILE_OPC_SLTI, | ||
350 | TILE_OPC_SLTI_SN, | ||
351 | TILE_OPC_SLTI_U, | ||
352 | TILE_OPC_SLTI_U_SN, | ||
353 | TILE_OPC_SLTIB, | ||
354 | TILE_OPC_SLTIB_SN, | ||
355 | TILE_OPC_SLTIB_U, | ||
356 | TILE_OPC_SLTIB_U_SN, | ||
357 | TILE_OPC_SLTIH, | ||
358 | TILE_OPC_SLTIH_SN, | ||
359 | TILE_OPC_SLTIH_U, | ||
360 | TILE_OPC_SLTIH_U_SN, | ||
361 | TILE_OPC_SNE, | ||
362 | TILE_OPC_SNE_SN, | ||
363 | TILE_OPC_SNEB, | ||
364 | TILE_OPC_SNEB_SN, | ||
365 | TILE_OPC_SNEH, | ||
366 | TILE_OPC_SNEH_SN, | ||
367 | TILE_OPC_SRA, | ||
368 | TILE_OPC_SRA_SN, | ||
369 | TILE_OPC_SRAB, | ||
370 | TILE_OPC_SRAB_SN, | ||
371 | TILE_OPC_SRAH, | ||
372 | TILE_OPC_SRAH_SN, | ||
373 | TILE_OPC_SRAI, | ||
374 | TILE_OPC_SRAI_SN, | ||
375 | TILE_OPC_SRAIB, | ||
376 | TILE_OPC_SRAIB_SN, | ||
377 | TILE_OPC_SRAIH, | ||
378 | TILE_OPC_SRAIH_SN, | ||
379 | TILE_OPC_SUB, | ||
380 | TILE_OPC_SUB_SN, | ||
381 | TILE_OPC_SUBB, | ||
382 | TILE_OPC_SUBB_SN, | ||
383 | TILE_OPC_SUBBS_U, | ||
384 | TILE_OPC_SUBBS_U_SN, | ||
385 | TILE_OPC_SUBH, | ||
386 | TILE_OPC_SUBH_SN, | ||
387 | TILE_OPC_SUBHS, | ||
388 | TILE_OPC_SUBHS_SN, | ||
389 | TILE_OPC_SUBS, | ||
390 | TILE_OPC_SUBS_SN, | ||
391 | TILE_OPC_SW, | ||
392 | TILE_OPC_SWADD, | ||
393 | TILE_OPC_SWINT0, | ||
394 | TILE_OPC_SWINT1, | ||
395 | TILE_OPC_SWINT2, | ||
396 | TILE_OPC_SWINT3, | ||
397 | TILE_OPC_TBLIDXB0, | ||
398 | TILE_OPC_TBLIDXB0_SN, | ||
399 | TILE_OPC_TBLIDXB1, | ||
400 | TILE_OPC_TBLIDXB1_SN, | ||
401 | TILE_OPC_TBLIDXB2, | ||
402 | TILE_OPC_TBLIDXB2_SN, | ||
403 | TILE_OPC_TBLIDXB3, | ||
404 | TILE_OPC_TBLIDXB3_SN, | ||
405 | TILE_OPC_TNS, | ||
406 | TILE_OPC_TNS_SN, | ||
407 | TILE_OPC_WH64, | ||
408 | TILE_OPC_XOR, | ||
409 | TILE_OPC_XOR_SN, | ||
410 | TILE_OPC_XORI, | ||
411 | TILE_OPC_XORI_SN, | ||
412 | TILE_OPC_NONE | ||
413 | } tile_mnemonic; | ||
414 | |||
415 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
416 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
417 | |||
418 | |||
419 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
420 | |||
421 | #define TILE_ELF_NAME "elf32-tilepro" | ||
422 | |||
423 | |||
424 | static __inline unsigned int | ||
425 | get_BrOff_SN(tile_bundle_bits num) | ||
426 | { | ||
427 | const unsigned int n = (unsigned int)num; | ||
428 | return (((n >> 0)) & 0x3ff); | ||
429 | } | ||
430 | |||
431 | static __inline unsigned int | ||
432 | get_BrOff_X1(tile_bundle_bits n) | ||
433 | { | ||
434 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
435 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
436 | } | ||
437 | |||
438 | static __inline unsigned int | ||
439 | get_BrType_X1(tile_bundle_bits n) | ||
440 | { | ||
441 | return (((unsigned int)(n >> 31)) & 0xf); | ||
442 | } | ||
443 | |||
444 | static __inline unsigned int | ||
445 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
446 | { | ||
447 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
448 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
449 | } | ||
450 | |||
451 | static __inline unsigned int | ||
452 | get_Dest_SN(tile_bundle_bits num) | ||
453 | { | ||
454 | const unsigned int n = (unsigned int)num; | ||
455 | return (((n >> 2)) & 0x3); | ||
456 | } | ||
457 | |||
458 | static __inline unsigned int | ||
459 | get_Dest_X0(tile_bundle_bits num) | ||
460 | { | ||
461 | const unsigned int n = (unsigned int)num; | ||
462 | return (((n >> 0)) & 0x3f); | ||
463 | } | ||
464 | |||
465 | static __inline unsigned int | ||
466 | get_Dest_X1(tile_bundle_bits n) | ||
467 | { | ||
468 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
469 | } | ||
470 | |||
471 | static __inline unsigned int | ||
472 | get_Dest_Y0(tile_bundle_bits num) | ||
473 | { | ||
474 | const unsigned int n = (unsigned int)num; | ||
475 | return (((n >> 0)) & 0x3f); | ||
476 | } | ||
477 | |||
478 | static __inline unsigned int | ||
479 | get_Dest_Y1(tile_bundle_bits n) | ||
480 | { | ||
481 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
482 | } | ||
483 | |||
484 | static __inline unsigned int | ||
485 | get_Imm16_X0(tile_bundle_bits num) | ||
486 | { | ||
487 | const unsigned int n = (unsigned int)num; | ||
488 | return (((n >> 12)) & 0xffff); | ||
489 | } | ||
490 | |||
491 | static __inline unsigned int | ||
492 | get_Imm16_X1(tile_bundle_bits n) | ||
493 | { | ||
494 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
495 | } | ||
496 | |||
497 | static __inline unsigned int | ||
498 | get_Imm8_SN(tile_bundle_bits num) | ||
499 | { | ||
500 | const unsigned int n = (unsigned int)num; | ||
501 | return (((n >> 0)) & 0xff); | ||
502 | } | ||
503 | |||
504 | static __inline unsigned int | ||
505 | get_Imm8_X0(tile_bundle_bits num) | ||
506 | { | ||
507 | const unsigned int n = (unsigned int)num; | ||
508 | return (((n >> 12)) & 0xff); | ||
509 | } | ||
510 | |||
511 | static __inline unsigned int | ||
512 | get_Imm8_X1(tile_bundle_bits n) | ||
513 | { | ||
514 | return (((unsigned int)(n >> 43)) & 0xff); | ||
515 | } | ||
516 | |||
517 | static __inline unsigned int | ||
518 | get_Imm8_Y0(tile_bundle_bits num) | ||
519 | { | ||
520 | const unsigned int n = (unsigned int)num; | ||
521 | return (((n >> 12)) & 0xff); | ||
522 | } | ||
523 | |||
524 | static __inline unsigned int | ||
525 | get_Imm8_Y1(tile_bundle_bits n) | ||
526 | { | ||
527 | return (((unsigned int)(n >> 43)) & 0xff); | ||
528 | } | ||
529 | |||
530 | static __inline unsigned int | ||
531 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
532 | { | ||
533 | const unsigned int n = (unsigned int)num; | ||
534 | return (((n >> 20)) & 0x7f); | ||
535 | } | ||
536 | |||
537 | static __inline unsigned int | ||
538 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
539 | { | ||
540 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
541 | } | ||
542 | |||
543 | static __inline unsigned int | ||
544 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
545 | { | ||
546 | const unsigned int n = (unsigned int)num; | ||
547 | return (((n >> 8)) & 0x3); | ||
548 | } | ||
549 | |||
550 | static __inline unsigned int | ||
551 | get_JOffLong_X1(tile_bundle_bits n) | ||
552 | { | ||
553 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
554 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
555 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
556 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
557 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_JOff_X1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
564 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
565 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
566 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
567 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
568 | } | ||
569 | |||
570 | static __inline unsigned int | ||
571 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
572 | { | ||
573 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
574 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
575 | } | ||
576 | |||
577 | static __inline unsigned int | ||
578 | get_MMEnd_X0(tile_bundle_bits num) | ||
579 | { | ||
580 | const unsigned int n = (unsigned int)num; | ||
581 | return (((n >> 18)) & 0x1f); | ||
582 | } | ||
583 | |||
584 | static __inline unsigned int | ||
585 | get_MMEnd_X1(tile_bundle_bits n) | ||
586 | { | ||
587 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
588 | } | ||
589 | |||
590 | static __inline unsigned int | ||
591 | get_MMStart_X0(tile_bundle_bits num) | ||
592 | { | ||
593 | const unsigned int n = (unsigned int)num; | ||
594 | return (((n >> 23)) & 0x1f); | ||
595 | } | ||
596 | |||
597 | static __inline unsigned int | ||
598 | get_MMStart_X1(tile_bundle_bits n) | ||
599 | { | ||
600 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
601 | } | ||
602 | |||
603 | static __inline unsigned int | ||
604 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
605 | { | ||
606 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
607 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
608 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
609 | } | ||
610 | |||
611 | static __inline unsigned int | ||
612 | get_Mode(tile_bundle_bits n) | ||
613 | { | ||
614 | return (((unsigned int)(n >> 63)) & 0x1); | ||
615 | } | ||
616 | |||
617 | static __inline unsigned int | ||
618 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
619 | { | ||
620 | const unsigned int n = (unsigned int)num; | ||
621 | return (((n >> 0)) & 0xf); | ||
622 | } | ||
623 | |||
624 | static __inline unsigned int | ||
625 | get_Opcode_SN(tile_bundle_bits num) | ||
626 | { | ||
627 | const unsigned int n = (unsigned int)num; | ||
628 | return (((n >> 10)) & 0x3f); | ||
629 | } | ||
630 | |||
631 | static __inline unsigned int | ||
632 | get_Opcode_X0(tile_bundle_bits num) | ||
633 | { | ||
634 | const unsigned int n = (unsigned int)num; | ||
635 | return (((n >> 28)) & 0x7); | ||
636 | } | ||
637 | |||
638 | static __inline unsigned int | ||
639 | get_Opcode_X1(tile_bundle_bits n) | ||
640 | { | ||
641 | return (((unsigned int)(n >> 59)) & 0xf); | ||
642 | } | ||
643 | |||
644 | static __inline unsigned int | ||
645 | get_Opcode_Y0(tile_bundle_bits num) | ||
646 | { | ||
647 | const unsigned int n = (unsigned int)num; | ||
648 | return (((n >> 27)) & 0xf); | ||
649 | } | ||
650 | |||
651 | static __inline unsigned int | ||
652 | get_Opcode_Y1(tile_bundle_bits n) | ||
653 | { | ||
654 | return (((unsigned int)(n >> 59)) & 0xf); | ||
655 | } | ||
656 | |||
657 | static __inline unsigned int | ||
658 | get_Opcode_Y2(tile_bundle_bits n) | ||
659 | { | ||
660 | return (((unsigned int)(n >> 56)) & 0x7); | ||
661 | } | ||
662 | |||
663 | static __inline unsigned int | ||
664 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
665 | { | ||
666 | const unsigned int n = (unsigned int)num; | ||
667 | return (((n >> 4)) & 0xf); | ||
668 | } | ||
669 | |||
670 | static __inline unsigned int | ||
671 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
672 | { | ||
673 | const unsigned int n = (unsigned int)num; | ||
674 | return (((n >> 18)) & 0x1ff); | ||
675 | } | ||
676 | |||
677 | static __inline unsigned int | ||
678 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
679 | { | ||
680 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
681 | } | ||
682 | |||
683 | static __inline unsigned int | ||
684 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
685 | { | ||
686 | const unsigned int n = (unsigned int)num; | ||
687 | return (((n >> 18)) & 0x3); | ||
688 | } | ||
689 | |||
690 | static __inline unsigned int | ||
691 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
692 | { | ||
693 | return (((unsigned int)(n >> 49)) & 0x3); | ||
694 | } | ||
695 | |||
696 | static __inline unsigned int | ||
697 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
698 | { | ||
699 | const unsigned int n = (unsigned int)num; | ||
700 | return (((n >> 0)) & 0x3ff); | ||
701 | } | ||
702 | |||
703 | static __inline unsigned int | ||
704 | get_S_X0(tile_bundle_bits num) | ||
705 | { | ||
706 | const unsigned int n = (unsigned int)num; | ||
707 | return (((n >> 27)) & 0x1); | ||
708 | } | ||
709 | |||
710 | static __inline unsigned int | ||
711 | get_S_X1(tile_bundle_bits n) | ||
712 | { | ||
713 | return (((unsigned int)(n >> 58)) & 0x1); | ||
714 | } | ||
715 | |||
716 | static __inline unsigned int | ||
717 | get_ShAmt_X0(tile_bundle_bits num) | ||
718 | { | ||
719 | const unsigned int n = (unsigned int)num; | ||
720 | return (((n >> 12)) & 0x1f); | ||
721 | } | ||
722 | |||
723 | static __inline unsigned int | ||
724 | get_ShAmt_X1(tile_bundle_bits n) | ||
725 | { | ||
726 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
727 | } | ||
728 | |||
729 | static __inline unsigned int | ||
730 | get_ShAmt_Y0(tile_bundle_bits num) | ||
731 | { | ||
732 | const unsigned int n = (unsigned int)num; | ||
733 | return (((n >> 12)) & 0x1f); | ||
734 | } | ||
735 | |||
736 | static __inline unsigned int | ||
737 | get_ShAmt_Y1(tile_bundle_bits n) | ||
738 | { | ||
739 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
740 | } | ||
741 | |||
742 | static __inline unsigned int | ||
743 | get_SrcA_X0(tile_bundle_bits num) | ||
744 | { | ||
745 | const unsigned int n = (unsigned int)num; | ||
746 | return (((n >> 6)) & 0x3f); | ||
747 | } | ||
748 | |||
749 | static __inline unsigned int | ||
750 | get_SrcA_X1(tile_bundle_bits n) | ||
751 | { | ||
752 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
753 | } | ||
754 | |||
755 | static __inline unsigned int | ||
756 | get_SrcA_Y0(tile_bundle_bits num) | ||
757 | { | ||
758 | const unsigned int n = (unsigned int)num; | ||
759 | return (((n >> 6)) & 0x3f); | ||
760 | } | ||
761 | |||
762 | static __inline unsigned int | ||
763 | get_SrcA_Y1(tile_bundle_bits n) | ||
764 | { | ||
765 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
766 | } | ||
767 | |||
768 | static __inline unsigned int | ||
769 | get_SrcA_Y2(tile_bundle_bits n) | ||
770 | { | ||
771 | return (((n >> 26)) & 0x00000001) | | ||
772 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
773 | } | ||
774 | |||
775 | static __inline unsigned int | ||
776 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
777 | { | ||
778 | const unsigned int n = (unsigned int)num; | ||
779 | return (((n >> 20)) & 0x3f); | ||
780 | } | ||
781 | |||
782 | static __inline unsigned int | ||
783 | get_SrcB_X0(tile_bundle_bits num) | ||
784 | { | ||
785 | const unsigned int n = (unsigned int)num; | ||
786 | return (((n >> 12)) & 0x3f); | ||
787 | } | ||
788 | |||
789 | static __inline unsigned int | ||
790 | get_SrcB_X1(tile_bundle_bits n) | ||
791 | { | ||
792 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
793 | } | ||
794 | |||
795 | static __inline unsigned int | ||
796 | get_SrcB_Y0(tile_bundle_bits num) | ||
797 | { | ||
798 | const unsigned int n = (unsigned int)num; | ||
799 | return (((n >> 12)) & 0x3f); | ||
800 | } | ||
801 | |||
802 | static __inline unsigned int | ||
803 | get_SrcB_Y1(tile_bundle_bits n) | ||
804 | { | ||
805 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
806 | } | ||
807 | |||
808 | static __inline unsigned int | ||
809 | get_Src_SN(tile_bundle_bits num) | ||
810 | { | ||
811 | const unsigned int n = (unsigned int)num; | ||
812 | return (((n >> 0)) & 0x3); | ||
813 | } | ||
814 | |||
815 | static __inline unsigned int | ||
816 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
817 | { | ||
818 | const unsigned int n = (unsigned int)num; | ||
819 | return (((n >> 12)) & 0x1f); | ||
820 | } | ||
821 | |||
822 | static __inline unsigned int | ||
823 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
824 | { | ||
825 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
826 | } | ||
827 | |||
828 | static __inline unsigned int | ||
829 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
830 | { | ||
831 | const unsigned int n = (unsigned int)num; | ||
832 | return (((n >> 12)) & 0x1f); | ||
833 | } | ||
834 | |||
835 | static __inline unsigned int | ||
836 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
837 | { | ||
838 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
839 | } | ||
840 | |||
841 | static __inline unsigned int | ||
842 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
843 | { | ||
844 | const unsigned int n = (unsigned int)num; | ||
845 | return (((n >> 17)) & 0x3ff); | ||
846 | } | ||
847 | |||
848 | static __inline unsigned int | ||
849 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
850 | { | ||
851 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
852 | } | ||
853 | |||
854 | static __inline unsigned int | ||
855 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
856 | { | ||
857 | const unsigned int n = (unsigned int)num; | ||
858 | return (((n >> 17)) & 0x7); | ||
859 | } | ||
860 | |||
861 | static __inline unsigned int | ||
862 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
863 | { | ||
864 | return (((unsigned int)(n >> 48)) & 0x7); | ||
865 | } | ||
866 | |||
867 | |||
868 | static __inline int | ||
869 | sign_extend(int n, int num_bits) | ||
870 | { | ||
871 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
872 | return (n << shift) >> shift; | ||
873 | } | ||
874 | |||
875 | |||
876 | |||
877 | static __inline tile_bundle_bits | ||
878 | create_BrOff_SN(int num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return ((n & 0x3ff) << 0); | ||
882 | } | ||
883 | |||
884 | static __inline tile_bundle_bits | ||
885 | create_BrOff_X1(int num) | ||
886 | { | ||
887 | const unsigned int n = (unsigned int)num; | ||
888 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
889 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
890 | } | ||
891 | |||
892 | static __inline tile_bundle_bits | ||
893 | create_BrType_X1(int num) | ||
894 | { | ||
895 | const unsigned int n = (unsigned int)num; | ||
896 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
897 | } | ||
898 | |||
899 | static __inline tile_bundle_bits | ||
900 | create_Dest_Imm8_X1(int num) | ||
901 | { | ||
902 | const unsigned int n = (unsigned int)num; | ||
903 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
904 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
905 | } | ||
906 | |||
907 | static __inline tile_bundle_bits | ||
908 | create_Dest_SN(int num) | ||
909 | { | ||
910 | const unsigned int n = (unsigned int)num; | ||
911 | return ((n & 0x3) << 2); | ||
912 | } | ||
913 | |||
914 | static __inline tile_bundle_bits | ||
915 | create_Dest_X0(int num) | ||
916 | { | ||
917 | const unsigned int n = (unsigned int)num; | ||
918 | return ((n & 0x3f) << 0); | ||
919 | } | ||
920 | |||
921 | static __inline tile_bundle_bits | ||
922 | create_Dest_X1(int num) | ||
923 | { | ||
924 | const unsigned int n = (unsigned int)num; | ||
925 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_Dest_Y0(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return ((n & 0x3f) << 0); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Y1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
940 | } | ||
941 | |||
942 | static __inline tile_bundle_bits | ||
943 | create_Imm16_X0(int num) | ||
944 | { | ||
945 | const unsigned int n = (unsigned int)num; | ||
946 | return ((n & 0xffff) << 12); | ||
947 | } | ||
948 | |||
949 | static __inline tile_bundle_bits | ||
950 | create_Imm16_X1(int num) | ||
951 | { | ||
952 | const unsigned int n = (unsigned int)num; | ||
953 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
954 | } | ||
955 | |||
956 | static __inline tile_bundle_bits | ||
957 | create_Imm8_SN(int num) | ||
958 | { | ||
959 | const unsigned int n = (unsigned int)num; | ||
960 | return ((n & 0xff) << 0); | ||
961 | } | ||
962 | |||
963 | static __inline tile_bundle_bits | ||
964 | create_Imm8_X0(int num) | ||
965 | { | ||
966 | const unsigned int n = (unsigned int)num; | ||
967 | return ((n & 0xff) << 12); | ||
968 | } | ||
969 | |||
970 | static __inline tile_bundle_bits | ||
971 | create_Imm8_X1(int num) | ||
972 | { | ||
973 | const unsigned int n = (unsigned int)num; | ||
974 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
975 | } | ||
976 | |||
977 | static __inline tile_bundle_bits | ||
978 | create_Imm8_Y0(int num) | ||
979 | { | ||
980 | const unsigned int n = (unsigned int)num; | ||
981 | return ((n & 0xff) << 12); | ||
982 | } | ||
983 | |||
984 | static __inline tile_bundle_bits | ||
985 | create_Imm8_Y1(int num) | ||
986 | { | ||
987 | const unsigned int n = (unsigned int)num; | ||
988 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
989 | } | ||
990 | |||
991 | static __inline tile_bundle_bits | ||
992 | create_ImmOpcodeExtension_X0(int num) | ||
993 | { | ||
994 | const unsigned int n = (unsigned int)num; | ||
995 | return ((n & 0x7f) << 20); | ||
996 | } | ||
997 | |||
998 | static __inline tile_bundle_bits | ||
999 | create_ImmOpcodeExtension_X1(int num) | ||
1000 | { | ||
1001 | const unsigned int n = (unsigned int)num; | ||
1002 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1003 | } | ||
1004 | |||
1005 | static __inline tile_bundle_bits | ||
1006 | create_ImmRROpcodeExtension_SN(int num) | ||
1007 | { | ||
1008 | const unsigned int n = (unsigned int)num; | ||
1009 | return ((n & 0x3) << 8); | ||
1010 | } | ||
1011 | |||
1012 | static __inline tile_bundle_bits | ||
1013 | create_JOffLong_X1(int num) | ||
1014 | { | ||
1015 | const unsigned int n = (unsigned int)num; | ||
1016 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1017 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1018 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1019 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1020 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1021 | } | ||
1022 | |||
1023 | static __inline tile_bundle_bits | ||
1024 | create_JOff_X1(int num) | ||
1025 | { | ||
1026 | const unsigned int n = (unsigned int)num; | ||
1027 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1028 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1029 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1030 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1031 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_MF_Imm15_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1039 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1040 | } | ||
1041 | |||
1042 | static __inline tile_bundle_bits | ||
1043 | create_MMEnd_X0(int num) | ||
1044 | { | ||
1045 | const unsigned int n = (unsigned int)num; | ||
1046 | return ((n & 0x1f) << 18); | ||
1047 | } | ||
1048 | |||
1049 | static __inline tile_bundle_bits | ||
1050 | create_MMEnd_X1(int num) | ||
1051 | { | ||
1052 | const unsigned int n = (unsigned int)num; | ||
1053 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1054 | } | ||
1055 | |||
1056 | static __inline tile_bundle_bits | ||
1057 | create_MMStart_X0(int num) | ||
1058 | { | ||
1059 | const unsigned int n = (unsigned int)num; | ||
1060 | return ((n & 0x1f) << 23); | ||
1061 | } | ||
1062 | |||
1063 | static __inline tile_bundle_bits | ||
1064 | create_MMStart_X1(int num) | ||
1065 | { | ||
1066 | const unsigned int n = (unsigned int)num; | ||
1067 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MT_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1075 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1076 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1077 | } | ||
1078 | |||
1079 | static __inline tile_bundle_bits | ||
1080 | create_Mode(int num) | ||
1081 | { | ||
1082 | const unsigned int n = (unsigned int)num; | ||
1083 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1084 | } | ||
1085 | |||
1086 | static __inline tile_bundle_bits | ||
1087 | create_NoRegOpcodeExtension_SN(int num) | ||
1088 | { | ||
1089 | const unsigned int n = (unsigned int)num; | ||
1090 | return ((n & 0xf) << 0); | ||
1091 | } | ||
1092 | |||
1093 | static __inline tile_bundle_bits | ||
1094 | create_Opcode_SN(int num) | ||
1095 | { | ||
1096 | const unsigned int n = (unsigned int)num; | ||
1097 | return ((n & 0x3f) << 10); | ||
1098 | } | ||
1099 | |||
1100 | static __inline tile_bundle_bits | ||
1101 | create_Opcode_X0(int num) | ||
1102 | { | ||
1103 | const unsigned int n = (unsigned int)num; | ||
1104 | return ((n & 0x7) << 28); | ||
1105 | } | ||
1106 | |||
1107 | static __inline tile_bundle_bits | ||
1108 | create_Opcode_X1(int num) | ||
1109 | { | ||
1110 | const unsigned int n = (unsigned int)num; | ||
1111 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1112 | } | ||
1113 | |||
1114 | static __inline tile_bundle_bits | ||
1115 | create_Opcode_Y0(int num) | ||
1116 | { | ||
1117 | const unsigned int n = (unsigned int)num; | ||
1118 | return ((n & 0xf) << 27); | ||
1119 | } | ||
1120 | |||
1121 | static __inline tile_bundle_bits | ||
1122 | create_Opcode_Y1(int num) | ||
1123 | { | ||
1124 | const unsigned int n = (unsigned int)num; | ||
1125 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1126 | } | ||
1127 | |||
1128 | static __inline tile_bundle_bits | ||
1129 | create_Opcode_Y2(int num) | ||
1130 | { | ||
1131 | const unsigned int n = (unsigned int)num; | ||
1132 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1133 | } | ||
1134 | |||
1135 | static __inline tile_bundle_bits | ||
1136 | create_RROpcodeExtension_SN(int num) | ||
1137 | { | ||
1138 | const unsigned int n = (unsigned int)num; | ||
1139 | return ((n & 0xf) << 4); | ||
1140 | } | ||
1141 | |||
1142 | static __inline tile_bundle_bits | ||
1143 | create_RRROpcodeExtension_X0(int num) | ||
1144 | { | ||
1145 | const unsigned int n = (unsigned int)num; | ||
1146 | return ((n & 0x1ff) << 18); | ||
1147 | } | ||
1148 | |||
1149 | static __inline tile_bundle_bits | ||
1150 | create_RRROpcodeExtension_X1(int num) | ||
1151 | { | ||
1152 | const unsigned int n = (unsigned int)num; | ||
1153 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1154 | } | ||
1155 | |||
1156 | static __inline tile_bundle_bits | ||
1157 | create_RRROpcodeExtension_Y0(int num) | ||
1158 | { | ||
1159 | const unsigned int n = (unsigned int)num; | ||
1160 | return ((n & 0x3) << 18); | ||
1161 | } | ||
1162 | |||
1163 | static __inline tile_bundle_bits | ||
1164 | create_RRROpcodeExtension_Y1(int num) | ||
1165 | { | ||
1166 | const unsigned int n = (unsigned int)num; | ||
1167 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1168 | } | ||
1169 | |||
1170 | static __inline tile_bundle_bits | ||
1171 | create_RouteOpcodeExtension_SN(int num) | ||
1172 | { | ||
1173 | const unsigned int n = (unsigned int)num; | ||
1174 | return ((n & 0x3ff) << 0); | ||
1175 | } | ||
1176 | |||
1177 | static __inline tile_bundle_bits | ||
1178 | create_S_X0(int num) | ||
1179 | { | ||
1180 | const unsigned int n = (unsigned int)num; | ||
1181 | return ((n & 0x1) << 27); | ||
1182 | } | ||
1183 | |||
1184 | static __inline tile_bundle_bits | ||
1185 | create_S_X1(int num) | ||
1186 | { | ||
1187 | const unsigned int n = (unsigned int)num; | ||
1188 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1189 | } | ||
1190 | |||
1191 | static __inline tile_bundle_bits | ||
1192 | create_ShAmt_X0(int num) | ||
1193 | { | ||
1194 | const unsigned int n = (unsigned int)num; | ||
1195 | return ((n & 0x1f) << 12); | ||
1196 | } | ||
1197 | |||
1198 | static __inline tile_bundle_bits | ||
1199 | create_ShAmt_X1(int num) | ||
1200 | { | ||
1201 | const unsigned int n = (unsigned int)num; | ||
1202 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1203 | } | ||
1204 | |||
1205 | static __inline tile_bundle_bits | ||
1206 | create_ShAmt_Y0(int num) | ||
1207 | { | ||
1208 | const unsigned int n = (unsigned int)num; | ||
1209 | return ((n & 0x1f) << 12); | ||
1210 | } | ||
1211 | |||
1212 | static __inline tile_bundle_bits | ||
1213 | create_ShAmt_Y1(int num) | ||
1214 | { | ||
1215 | const unsigned int n = (unsigned int)num; | ||
1216 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1217 | } | ||
1218 | |||
1219 | static __inline tile_bundle_bits | ||
1220 | create_SrcA_X0(int num) | ||
1221 | { | ||
1222 | const unsigned int n = (unsigned int)num; | ||
1223 | return ((n & 0x3f) << 6); | ||
1224 | } | ||
1225 | |||
1226 | static __inline tile_bundle_bits | ||
1227 | create_SrcA_X1(int num) | ||
1228 | { | ||
1229 | const unsigned int n = (unsigned int)num; | ||
1230 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1231 | } | ||
1232 | |||
1233 | static __inline tile_bundle_bits | ||
1234 | create_SrcA_Y0(int num) | ||
1235 | { | ||
1236 | const unsigned int n = (unsigned int)num; | ||
1237 | return ((n & 0x3f) << 6); | ||
1238 | } | ||
1239 | |||
1240 | static __inline tile_bundle_bits | ||
1241 | create_SrcA_Y1(int num) | ||
1242 | { | ||
1243 | const unsigned int n = (unsigned int)num; | ||
1244 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1245 | } | ||
1246 | |||
1247 | static __inline tile_bundle_bits | ||
1248 | create_SrcA_Y2(int num) | ||
1249 | { | ||
1250 | const unsigned int n = (unsigned int)num; | ||
1251 | return ((n & 0x00000001) << 26) | | ||
1252 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcBDest_Y2(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 20); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcB_X0(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return ((n & 0x3f) << 12); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcB_X1(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcB_Y0(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return ((n & 0x3f) << 12); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcB_Y1(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1288 | } | ||
1289 | |||
1290 | static __inline tile_bundle_bits | ||
1291 | create_Src_SN(int num) | ||
1292 | { | ||
1293 | const unsigned int n = (unsigned int)num; | ||
1294 | return ((n & 0x3) << 0); | ||
1295 | } | ||
1296 | |||
1297 | static __inline tile_bundle_bits | ||
1298 | create_UnOpcodeExtension_X0(int num) | ||
1299 | { | ||
1300 | const unsigned int n = (unsigned int)num; | ||
1301 | return ((n & 0x1f) << 12); | ||
1302 | } | ||
1303 | |||
1304 | static __inline tile_bundle_bits | ||
1305 | create_UnOpcodeExtension_X1(int num) | ||
1306 | { | ||
1307 | const unsigned int n = (unsigned int)num; | ||
1308 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1309 | } | ||
1310 | |||
1311 | static __inline tile_bundle_bits | ||
1312 | create_UnOpcodeExtension_Y0(int num) | ||
1313 | { | ||
1314 | const unsigned int n = (unsigned int)num; | ||
1315 | return ((n & 0x1f) << 12); | ||
1316 | } | ||
1317 | |||
1318 | static __inline tile_bundle_bits | ||
1319 | create_UnOpcodeExtension_Y1(int num) | ||
1320 | { | ||
1321 | const unsigned int n = (unsigned int)num; | ||
1322 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1323 | } | ||
1324 | |||
1325 | static __inline tile_bundle_bits | ||
1326 | create_UnShOpcodeExtension_X0(int num) | ||
1327 | { | ||
1328 | const unsigned int n = (unsigned int)num; | ||
1329 | return ((n & 0x3ff) << 17); | ||
1330 | } | ||
1331 | |||
1332 | static __inline tile_bundle_bits | ||
1333 | create_UnShOpcodeExtension_X1(int num) | ||
1334 | { | ||
1335 | const unsigned int n = (unsigned int)num; | ||
1336 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1337 | } | ||
1338 | |||
1339 | static __inline tile_bundle_bits | ||
1340 | create_UnShOpcodeExtension_Y0(int num) | ||
1341 | { | ||
1342 | const unsigned int n = (unsigned int)num; | ||
1343 | return ((n & 0x7) << 17); | ||
1344 | } | ||
1345 | |||
1346 | static __inline tile_bundle_bits | ||
1347 | create_UnShOpcodeExtension_Y1(int num) | ||
1348 | { | ||
1349 | const unsigned int n = (unsigned int)num; | ||
1350 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1351 | } | ||
1352 | |||
1353 | |||
1354 | |||
1355 | typedef enum | ||
1356 | { | ||
1357 | TILE_PIPELINE_X0, | ||
1358 | TILE_PIPELINE_X1, | ||
1359 | TILE_PIPELINE_Y0, | ||
1360 | TILE_PIPELINE_Y1, | ||
1361 | TILE_PIPELINE_Y2, | ||
1362 | } tile_pipeline; | ||
1363 | |||
1364 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1365 | |||
1366 | typedef enum | ||
1367 | { | ||
1368 | TILE_OP_TYPE_REGISTER, | ||
1369 | TILE_OP_TYPE_IMMEDIATE, | ||
1370 | TILE_OP_TYPE_ADDRESS, | ||
1371 | TILE_OP_TYPE_SPR | ||
1372 | } tile_operand_type; | ||
1373 | |||
1374 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1375 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1376 | |||
1377 | enum | ||
1378 | { | ||
1379 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1380 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1381 | |||
1382 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1383 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1384 | |||
1385 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1386 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1387 | |||
1388 | /* Instructions take this many bytes. */ | ||
1389 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1390 | |||
1391 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1392 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1393 | |||
1394 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1395 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1396 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1397 | |||
1398 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1399 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1400 | |||
1401 | /* Static network instructions take this many bytes. */ | ||
1402 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1403 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1404 | |||
1405 | /* Number of registers (some are magic, such as network I/O). */ | ||
1406 | TILE_NUM_REGISTERS = 64, | ||
1407 | |||
1408 | /* Number of static network registers. */ | ||
1409 | TILE_NUM_SN_REGISTERS = 4 | ||
1410 | }; | ||
1411 | |||
1412 | |||
1413 | struct tile_operand | ||
1414 | { | ||
1415 | /* Is this operand a register, immediate or address? */ | ||
1416 | tile_operand_type type; | ||
1417 | |||
1418 | /* The default relocation type for this operand. */ | ||
1419 | signed int default_reloc : 16; | ||
1420 | |||
1421 | /* How many bits is this value? (used for range checking) */ | ||
1422 | unsigned int num_bits : 5; | ||
1423 | |||
1424 | /* Is the value signed? (used for range checking) */ | ||
1425 | unsigned int is_signed : 1; | ||
1426 | |||
1427 | /* Is this operand a source register? */ | ||
1428 | unsigned int is_src_reg : 1; | ||
1429 | |||
1430 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1431 | unsigned int is_dest_reg : 1; | ||
1432 | |||
1433 | /* Is this operand PC-relative? */ | ||
1434 | unsigned int is_pc_relative : 1; | ||
1435 | |||
1436 | /* By how many bits do we right shift the value before inserting? */ | ||
1437 | unsigned int rightshift : 2; | ||
1438 | |||
1439 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1440 | tile_bundle_bits (*insert) (int op); | ||
1441 | |||
1442 | /* Extract this operand and return it. */ | ||
1443 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1444 | }; | ||
1445 | |||
1446 | |||
1447 | extern const struct tile_operand tile_operands[]; | ||
1448 | |||
1449 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1450 | extern const unsigned short * const | ||
1451 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1452 | |||
1453 | |||
1454 | struct tile_opcode | ||
1455 | { | ||
1456 | /* The opcode mnemonic, e.g. "add" */ | ||
1457 | const char *name; | ||
1458 | |||
1459 | /* The enum value for this mnemonic. */ | ||
1460 | tile_mnemonic mnemonic; | ||
1461 | |||
1462 | /* A bit mask of which of the five pipes this instruction | ||
1463 | is compatible with: | ||
1464 | X0 0x01 | ||
1465 | X1 0x02 | ||
1466 | Y0 0x04 | ||
1467 | Y1 0x08 | ||
1468 | Y2 0x10 */ | ||
1469 | unsigned char pipes; | ||
1470 | |||
1471 | /* How many operands are there? */ | ||
1472 | unsigned char num_operands; | ||
1473 | |||
1474 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1475 | unsigned char implicitly_written_register; | ||
1476 | |||
1477 | /* Can this be bundled with other instructions (almost always true). */ | ||
1478 | unsigned char can_bundle; | ||
1479 | |||
1480 | /* The description of the operands. Each of these is an | ||
1481 | * index into the tile_operands[] table. */ | ||
1482 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1483 | |||
1484 | }; | ||
1485 | |||
1486 | extern const struct tile_opcode tile_opcodes[]; | ||
1487 | |||
1488 | |||
1489 | /* Used for non-textual disassembly into structs. */ | ||
1490 | struct tile_decoded_instruction | ||
1491 | { | ||
1492 | const struct tile_opcode *opcode; | ||
1493 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1494 | int operand_values[TILE_MAX_OPERANDS]; | ||
1495 | }; | ||
1496 | |||
1497 | |||
1498 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1499 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1500 | unsigned int pc, | ||
1501 | struct tile_decoded_instruction | ||
1502 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1503 | |||
1504 | |||
1505 | |||
1506 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode_constants.h b/arch/tile/include/asm/opcode_constants.h new file mode 100644 index 000000000000..37a9f2958cb1 --- /dev/null +++ b/arch/tile/include/asm/opcode_constants.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_CONSTANTS_H | ||
16 | #define _ASM_TILE_OPCODE_CONSTANTS_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode_constants_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode_constants_32.h> | ||
24 | #endif | ||
25 | |||
26 | #endif /* _ASM_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_32.h b/arch/tile/include/asm/opcode_constants_32.h new file mode 100644 index 000000000000..227d033b180c --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_32.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h new file mode 100644 index 000000000000..227d033b180c --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_64.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h new file mode 100644 index 000000000000..f894a9016da6 --- /dev/null +++ b/arch/tile/include/asm/page.h | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PAGE_H | ||
16 | #define _ASM_TILE_PAGE_H | ||
17 | |||
18 | #include <linux/const.h> | ||
19 | |||
20 | /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ | ||
21 | #define PAGE_SHIFT 16 | ||
22 | #define HPAGE_SHIFT 24 | ||
23 | |||
24 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | ||
25 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
26 | |||
27 | #define PAGE_MASK (~(PAGE_SIZE - 1)) | ||
28 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
29 | |||
30 | #ifdef __KERNEL__ | ||
31 | |||
32 | #include <hv/hypervisor.h> | ||
33 | #include <arch/chip.h> | ||
34 | |||
35 | /* | ||
36 | * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx | ||
37 | * definitions in <hv/hypervisor.h>. We validate this at build time | ||
38 | * here, and again at runtime during early boot. We provide a | ||
39 | * separate definition since userspace doesn't have <hv/hypervisor.h>. | ||
40 | * | ||
41 | * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since | ||
42 | * they are the same on i386 but not TILE. | ||
43 | */ | ||
44 | #if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT | ||
45 | # error Small page size mismatch in Linux | ||
46 | #endif | ||
47 | #if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT | ||
48 | # error Huge page size mismatch in Linux | ||
49 | #endif | ||
50 | |||
51 | #ifndef __ASSEMBLY__ | ||
52 | |||
53 | #include <linux/types.h> | ||
54 | #include <linux/string.h> | ||
55 | |||
56 | struct page; | ||
57 | |||
58 | static inline void clear_page(void *page) | ||
59 | { | ||
60 | memset(page, 0, PAGE_SIZE); | ||
61 | } | ||
62 | |||
63 | static inline void copy_page(void *to, void *from) | ||
64 | { | ||
65 | memcpy(to, from, PAGE_SIZE); | ||
66 | } | ||
67 | |||
68 | static inline void clear_user_page(void *page, unsigned long vaddr, | ||
69 | struct page *pg) | ||
70 | { | ||
71 | clear_page(page); | ||
72 | } | ||
73 | |||
74 | static inline void copy_user_page(void *to, void *from, unsigned long vaddr, | ||
75 | struct page *topage) | ||
76 | { | ||
77 | copy_page(to, from); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Hypervisor page tables are made of the same basic structure. | ||
82 | */ | ||
83 | |||
84 | typedef __u64 pteval_t; | ||
85 | typedef __u64 pmdval_t; | ||
86 | typedef __u64 pudval_t; | ||
87 | typedef __u64 pgdval_t; | ||
88 | typedef __u64 pgprotval_t; | ||
89 | |||
90 | typedef HV_PTE pte_t; | ||
91 | typedef HV_PTE pgd_t; | ||
92 | typedef HV_PTE pgprot_t; | ||
93 | |||
94 | /* | ||
95 | * User L2 page tables are managed as one L2 page table per page, | ||
96 | * because we use the page allocator for them. This keeps the allocation | ||
97 | * simple and makes it potentially useful to implement HIGHPTE at some point. | ||
98 | * However, it's also inefficient, since L2 page tables are much smaller | ||
99 | * than pages (currently 2KB vs 64KB). So we should revisit this. | ||
100 | */ | ||
101 | typedef struct page *pgtable_t; | ||
102 | |||
103 | /* Must be a macro since it is used to create constants. */ | ||
104 | #define __pgprot(val) hv_pte(val) | ||
105 | |||
106 | static inline u64 pgprot_val(pgprot_t pgprot) | ||
107 | { | ||
108 | return hv_pte_val(pgprot); | ||
109 | } | ||
110 | |||
111 | static inline u64 pte_val(pte_t pte) | ||
112 | { | ||
113 | return hv_pte_val(pte); | ||
114 | } | ||
115 | |||
116 | static inline u64 pgd_val(pgd_t pgd) | ||
117 | { | ||
118 | return hv_pte_val(pgd); | ||
119 | } | ||
120 | |||
121 | #ifdef __tilegx__ | ||
122 | |||
123 | typedef HV_PTE pmd_t; | ||
124 | |||
125 | static inline u64 pmd_val(pmd_t pmd) | ||
126 | { | ||
127 | return hv_pte_val(pmd); | ||
128 | } | ||
129 | |||
130 | #endif | ||
131 | |||
132 | #endif /* !__ASSEMBLY__ */ | ||
133 | |||
134 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
135 | |||
136 | #define HUGE_MAX_HSTATE 2 | ||
137 | |||
138 | #ifdef CONFIG_HUGETLB_PAGE | ||
139 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
140 | #endif | ||
141 | |||
142 | /* Each memory controller has PAs distinct in their high bits. */ | ||
143 | #define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS()) | ||
144 | #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS()) | ||
145 | #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT) | ||
146 | #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)) | ||
147 | |||
148 | #ifdef __tilegx__ | ||
149 | |||
150 | /* | ||
151 | * We reserve the lower half of memory for user-space programs, and the | ||
152 | * upper half for system code. We re-map all of physical memory in the | ||
153 | * upper half, which takes a quarter of our VA space. Then we have | ||
154 | * the vmalloc regions. The supervisor code lives at 0xfffffff700000000, | ||
155 | * with the hypervisor above that. | ||
156 | * | ||
157 | * Loadable kernel modules are placed immediately after the static | ||
158 | * supervisor code, with each being allocated a 256MB region of | ||
159 | * address space, so we don't have to worry about the range of "jal" | ||
160 | * and other branch instructions. | ||
161 | * | ||
162 | * For now we keep life simple and just allocate one pmd (4GB) for vmalloc. | ||
163 | * Similarly, for now we don't play any struct page mapping games. | ||
164 | */ | ||
165 | |||
166 | #if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH() | ||
167 | # error Too much PA to map with the VA available! | ||
168 | #endif | ||
169 | #define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1)) | ||
170 | |||
171 | #define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ | ||
172 | #define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ | ||
173 | #define PAGE_OFFSET MEM_HIGH_START | ||
174 | #define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */ | ||
175 | #define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ | ||
176 | #define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ | ||
177 | #define MEM_SV_INTRPT MEM_SV_START | ||
178 | #define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */ | ||
179 | #define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) | ||
180 | #define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */ | ||
181 | |||
182 | /* Highest DTLB address we will use */ | ||
183 | #define KERNEL_HIGH_VADDR MEM_SV_START | ||
184 | |||
185 | /* Since we don't currently provide any fixmaps, we use an impossible VA. */ | ||
186 | #define FIXADDR_TOP MEM_HV_START | ||
187 | |||
188 | #else /* !__tilegx__ */ | ||
189 | |||
190 | /* | ||
191 | * A PAGE_OFFSET of 0xC0000000 means that the kernel has | ||
192 | * a virtual address space of one gigabyte, which limits the | ||
193 | * amount of physical memory you can use to about 768MB. | ||
194 | * If you want more physical memory than this then see the CONFIG_HIGHMEM | ||
195 | * option in the kernel configuration. | ||
196 | * | ||
197 | * The top two 16MB chunks in the table below (VIRT and HV) are | ||
198 | * unavailable to Linux. Since the kernel interrupt vectors must live | ||
199 | * at 0xfd000000, we map all of the bottom of RAM at this address with | ||
200 | * a huge page table entry to minimize its ITLB footprint (as well as | ||
201 | * at PAGE_OFFSET). The last architected requirement is that user | ||
202 | * interrupt vectors live at 0xfc000000, so we make that range of | ||
203 | * memory available to user processes. The remaining regions are sized | ||
204 | * as shown; after the first four addresses, we show "typical" values, | ||
205 | * since the actual addresses depend on kernel #defines. | ||
206 | * | ||
207 | * MEM_VIRT_INTRPT 0xff000000 | ||
208 | * MEM_HV_INTRPT 0xfe000000 | ||
209 | * MEM_SV_INTRPT (kernel code) 0xfd000000 | ||
210 | * MEM_USER_INTRPT (user vector) 0xfc000000 | ||
211 | * FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR) | ||
212 | * PKMAP_BASE 0xf7000000 (via LAST_PKMAP) | ||
213 | * HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS) | ||
214 | * VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE) | ||
215 | * mapped LOWMEM 0xc0000000 | ||
216 | */ | ||
217 | |||
218 | #define MEM_USER_INTRPT _AC(0xfc000000, UL) | ||
219 | #define MEM_SV_INTRPT _AC(0xfd000000, UL) | ||
220 | #define MEM_HV_INTRPT _AC(0xfe000000, UL) | ||
221 | #define MEM_VIRT_INTRPT _AC(0xff000000, UL) | ||
222 | |||
223 | #define INTRPT_SIZE 0x4000 | ||
224 | |||
225 | /* Tolerate page size larger than the architecture interrupt region size. */ | ||
226 | #if PAGE_SIZE > INTRPT_SIZE | ||
227 | #undef INTRPT_SIZE | ||
228 | #define INTRPT_SIZE PAGE_SIZE | ||
229 | #endif | ||
230 | |||
231 | #define KERNEL_HIGH_VADDR MEM_USER_INTRPT | ||
232 | #define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE) | ||
233 | |||
234 | #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) | ||
235 | |||
236 | /* On 32-bit architectures we mix kernel modules in with other vmaps. */ | ||
237 | #define MEM_MODULE_START VMALLOC_START | ||
238 | #define MEM_MODULE_END VMALLOC_END | ||
239 | |||
240 | #endif /* __tilegx__ */ | ||
241 | |||
242 | #ifndef __ASSEMBLY__ | ||
243 | |||
244 | #ifdef CONFIG_HIGHMEM | ||
245 | |||
246 | /* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */ | ||
247 | extern unsigned long pbase_map[]; | ||
248 | extern void *vbase_map[]; | ||
249 | |||
250 | static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr) | ||
251 | { | ||
252 | unsigned long kaddr = (unsigned long)_kaddr; | ||
253 | return pbase_map[kaddr >> HPAGE_SHIFT] + | ||
254 | ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT); | ||
255 | } | ||
256 | |||
257 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
258 | { | ||
259 | return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT); | ||
260 | } | ||
261 | |||
262 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
263 | { | ||
264 | unsigned long pfn = kaddr_to_pfn(kaddr); | ||
265 | return ((phys_addr_t)pfn << PAGE_SHIFT) + | ||
266 | ((unsigned long)kaddr & (PAGE_SIZE-1)); | ||
267 | } | ||
268 | |||
269 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
270 | { | ||
271 | return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1)); | ||
272 | } | ||
273 | |||
274 | /* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */ | ||
275 | static inline int virt_addr_valid(const volatile void *kaddr) | ||
276 | { | ||
277 | extern void *high_memory; /* copied from <linux/mm.h> */ | ||
278 | return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory); | ||
279 | } | ||
280 | |||
281 | #else /* !CONFIG_HIGHMEM */ | ||
282 | |||
283 | static inline unsigned long kaddr_to_pfn(const volatile void *kaddr) | ||
284 | { | ||
285 | return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT; | ||
286 | } | ||
287 | |||
288 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
289 | { | ||
290 | return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET); | ||
291 | } | ||
292 | |||
293 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
294 | { | ||
295 | return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET); | ||
296 | } | ||
297 | |||
298 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
299 | { | ||
300 | return (void *)((unsigned long)paddr + PAGE_OFFSET); | ||
301 | } | ||
302 | |||
303 | /* Check that the given address is within some mapped range of PAs. */ | ||
304 | #define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr)) | ||
305 | |||
306 | #endif /* !CONFIG_HIGHMEM */ | ||
307 | |||
308 | /* All callers are not consistent in how they call these functions. */ | ||
309 | #define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr)) | ||
310 | #define __va(paddr) phys_to_virt((phys_addr_t)(paddr)) | ||
311 | |||
312 | extern int devmem_is_allowed(unsigned long pagenr); | ||
313 | |||
314 | #ifdef CONFIG_FLATMEM | ||
315 | static inline int pfn_valid(unsigned long pfn) | ||
316 | { | ||
317 | return pfn < max_mapnr; | ||
318 | } | ||
319 | #endif | ||
320 | |||
321 | /* Provide as macros since these require some other headers included. */ | ||
322 | #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) | ||
323 | #define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) | ||
324 | #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) | ||
325 | |||
326 | struct mm_struct; | ||
327 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | ||
328 | |||
329 | #endif /* !__ASSEMBLY__ */ | ||
330 | |||
331 | #define VM_DATA_DEFAULT_FLAGS \ | ||
332 | (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
333 | |||
334 | #include <asm-generic/memory_model.h> | ||
335 | #include <asm-generic/getorder.h> | ||
336 | |||
337 | #endif /* __KERNEL__ */ | ||
338 | |||
339 | #endif /* _ASM_TILE_PAGE_H */ | ||
diff --git a/arch/tile/include/asm/param.h b/arch/tile/include/asm/param.h new file mode 100644 index 000000000000..965d45427975 --- /dev/null +++ b/arch/tile/include/asm/param.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/param.h> | |||
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h new file mode 100644 index 000000000000..e853b0e2793b --- /dev/null +++ b/arch/tile/include/asm/pci-bridge.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_BRIDGE_H | ||
16 | #define _ASM_TILE_PCI_BRIDGE_H | ||
17 | |||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | struct device_node; | ||
22 | struct pci_controller; | ||
23 | |||
24 | /* | ||
25 | * pci_io_base returns the memory address at which you can access | ||
26 | * the I/O space for PCI bus number `bus' (or NULL on error). | ||
27 | */ | ||
28 | extern void __iomem *pci_bus_io_base(unsigned int bus); | ||
29 | extern unsigned long pci_bus_io_base_phys(unsigned int bus); | ||
30 | extern unsigned long pci_bus_mem_base_phys(unsigned int bus); | ||
31 | |||
32 | /* Allocate a new PCI host bridge structure */ | ||
33 | extern struct pci_controller *pcibios_alloc_controller(void); | ||
34 | |||
35 | /* Helper function for setting up resources */ | ||
36 | extern void pci_init_resource(struct resource *res, unsigned long start, | ||
37 | unsigned long end, int flags, char *name); | ||
38 | |||
39 | /* Get the PCI host controller for a bus */ | ||
40 | extern struct pci_controller *pci_bus_to_hose(int bus); | ||
41 | |||
42 | /* | ||
43 | * Structure of a PCI controller (host bridge) | ||
44 | */ | ||
45 | struct pci_controller { | ||
46 | int index; /* PCI domain number */ | ||
47 | struct pci_bus *root_bus; | ||
48 | |||
49 | int first_busno; | ||
50 | int last_busno; | ||
51 | |||
52 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
53 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
54 | |||
55 | struct pci_ops *ops; | ||
56 | |||
57 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
58 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
59 | |||
60 | /* Address ranges that are routed to this controller/bridge. */ | ||
61 | struct resource mem_resources[3]; | ||
62 | }; | ||
63 | |||
64 | static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | ||
65 | { | ||
66 | return bus->sysdata; | ||
67 | } | ||
68 | |||
69 | extern void setup_indirect_pci_nomap(struct pci_controller *hose, | ||
70 | void __iomem *cfg_addr, void __iomem *cfg_data); | ||
71 | extern void setup_indirect_pci(struct pci_controller *hose, | ||
72 | u32 cfg_addr, u32 cfg_data); | ||
73 | extern void setup_grackle(struct pci_controller *hose); | ||
74 | |||
75 | extern unsigned char common_swizzle(struct pci_dev *, unsigned char *); | ||
76 | |||
77 | /* | ||
78 | * The following code swizzles for exactly one bridge. The routine | ||
79 | * common_swizzle below handles multiple bridges. But there are a | ||
80 | * some boards that don't follow the PCI spec's suggestion so we | ||
81 | * break this piece out separately. | ||
82 | */ | ||
83 | static inline unsigned char bridge_swizzle(unsigned char pin, | ||
84 | unsigned char idsel) | ||
85 | { | ||
86 | return (((pin-1) + idsel) % 4) + 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * The following macro is used to lookup irqs in a standard table | ||
91 | * format for those PPC systems that do not already have PCI | ||
92 | * interrupts properly routed. | ||
93 | */ | ||
94 | /* FIXME - double check this */ | ||
95 | #define PCI_IRQ_TABLE_LOOKUP ({ \ | ||
96 | long _ctl_ = -1; \ | ||
97 | if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ | ||
98 | _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ | ||
99 | _ctl_; \ | ||
100 | }) | ||
101 | |||
102 | /* | ||
103 | * Scan the buses below a given PCI host bridge and assign suitable | ||
104 | * resources to all devices found. | ||
105 | */ | ||
106 | extern int pciauto_bus_scan(struct pci_controller *, int); | ||
107 | |||
108 | #ifdef CONFIG_PCI | ||
109 | extern unsigned long pci_address_to_pio(phys_addr_t address); | ||
110 | #else | ||
111 | static inline unsigned long pci_address_to_pio(phys_addr_t address) | ||
112 | { | ||
113 | return (unsigned long)-1; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #endif /* _ASM_TILE_PCI_BRIDGE_H */ | ||
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h new file mode 100644 index 000000000000..b0c15da2d5d5 --- /dev/null +++ b/arch/tile/include/asm/pci.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_H | ||
16 | #define _ASM_TILE_PCI_H | ||
17 | |||
18 | #include <asm/pci-bridge.h> | ||
19 | |||
20 | /* | ||
21 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | ||
22 | * bus addresses are physical addresses. The networking and block | ||
23 | * device layers use this boolean for bounce buffer decisions. | ||
24 | */ | ||
25 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
26 | |||
27 | struct pci_controller *pci_bus_to_hose(int bus); | ||
28 | unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp); | ||
29 | int __init tile_pci_init(void); | ||
30 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | ||
31 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
32 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||
33 | |||
34 | int __devinit _tile_cfg_read(struct pci_controller *hose, | ||
35 | int bus, | ||
36 | int slot, | ||
37 | int function, | ||
38 | int offset, | ||
39 | int size, | ||
40 | u32 *val); | ||
41 | int __devinit _tile_cfg_write(struct pci_controller *hose, | ||
42 | int bus, | ||
43 | int slot, | ||
44 | int function, | ||
45 | int offset, | ||
46 | int size, | ||
47 | u32 val); | ||
48 | |||
49 | /* | ||
50 | * These are used to to config reads and writes in the early stages of | ||
51 | * setup before the driver infrastructure has been set up enough to be | ||
52 | * able to do config reads and writes. | ||
53 | */ | ||
54 | #define early_cfg_read(where, size, value) \ | ||
55 | _tile_cfg_read(controller, \ | ||
56 | current_bus, \ | ||
57 | pci_slot, \ | ||
58 | pci_fn, \ | ||
59 | where, \ | ||
60 | size, \ | ||
61 | value) | ||
62 | |||
63 | #define early_cfg_write(where, size, value) \ | ||
64 | _tile_cfg_write(controller, \ | ||
65 | current_bus, \ | ||
66 | pci_slot, \ | ||
67 | pci_fn, \ | ||
68 | where, \ | ||
69 | size, \ | ||
70 | value) | ||
71 | |||
72 | |||
73 | |||
74 | #define PCICFG_BYTE 1 | ||
75 | #define PCICFG_WORD 2 | ||
76 | #define PCICFG_DWORD 4 | ||
77 | |||
78 | #define TILE_NUM_PCIE 2 | ||
79 | |||
80 | #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) | ||
81 | |||
82 | /* | ||
83 | * This decides whether to display the domain number in /proc. | ||
84 | */ | ||
85 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * I/O space is currently not supported. | ||
92 | */ | ||
93 | |||
94 | #define TILE_PCIE_LOWER_IO 0x0 | ||
95 | #define TILE_PCIE_UPPER_IO 0x10000 | ||
96 | #define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF | ||
97 | |||
98 | #define _PAGE_NO_CACHE 0 | ||
99 | #define _PAGE_GUARDED 0 | ||
100 | |||
101 | |||
102 | #define pcibios_assign_all_busses() pci_assign_all_buses | ||
103 | extern int pci_assign_all_buses; | ||
104 | |||
105 | static inline void pcibios_set_master(struct pci_dev *dev) | ||
106 | { | ||
107 | /* No special bus mastering setup handling */ | ||
108 | } | ||
109 | |||
110 | #define PCIBIOS_MIN_MEM 0 | ||
111 | #define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO | ||
112 | |||
113 | /* | ||
114 | * This flag tells if the platform is TILEmpower that needs | ||
115 | * special configuration for the PLX switch chip. | ||
116 | */ | ||
117 | extern int blade_pci; | ||
118 | |||
119 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | ||
120 | #include <asm-generic/pci-dma-compat.h> | ||
121 | |||
122 | /* generic pci stuff */ | ||
123 | #include <asm-generic/pci.h> | ||
124 | |||
125 | /* Use any cpu for PCI. */ | ||
126 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
127 | |||
128 | #endif /* _ASM_TILE_PCI_H */ | ||
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h new file mode 100644 index 000000000000..63294f5a8efb --- /dev/null +++ b/arch/tile/include/asm/percpu.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PERCPU_H | ||
16 | #define _ASM_TILE_PERCPU_H | ||
17 | |||
18 | register unsigned long __my_cpu_offset __asm__("tp"); | ||
19 | #define __my_cpu_offset __my_cpu_offset | ||
20 | #define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) | ||
21 | |||
22 | #include <asm-generic/percpu.h> | ||
23 | |||
24 | #endif /* _ASM_TILE_PERCPU_H */ | ||
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h new file mode 100644 index 000000000000..cf52791a5501 --- /dev/null +++ b/arch/tile/include/asm/pgalloc.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PGALLOC_H | ||
16 | #define _ASM_TILE_PGALLOC_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/mmzone.h> | ||
21 | #include <asm/fixmap.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Bits for the size of the second-level page table. */ | ||
25 | #define L2_KERNEL_PGTABLE_SHIFT \ | ||
26 | (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) | ||
27 | |||
28 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ | ||
29 | #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL | ||
30 | #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | ||
31 | #else | ||
32 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT | ||
33 | #endif | ||
34 | |||
35 | /* How many pages do we need, as an "order", for a user L2 page table? */ | ||
36 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) | ||
37 | |||
38 | /* How big is a kernel L2 page table? */ | ||
39 | #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) | ||
40 | |||
41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | ||
42 | { | ||
43 | #ifdef CONFIG_64BIT | ||
44 | set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER); | ||
45 | #else | ||
46 | set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER); | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | static inline void pmd_populate_kernel(struct mm_struct *mm, | ||
51 | pmd_t *pmd, pte_t *ptep) | ||
52 | { | ||
53 | set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, | ||
54 | __pgprot(_PAGE_PRESENT))); | ||
55 | } | ||
56 | |||
57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | ||
58 | pgtable_t page) | ||
59 | { | ||
60 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), | ||
61 | __pgprot(_PAGE_PRESENT))); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Allocate and free page tables. | ||
66 | */ | ||
67 | |||
68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | ||
70 | |||
71 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | ||
72 | extern void pte_free(struct mm_struct *mm, struct page *pte); | ||
73 | |||
74 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
75 | |||
76 | static inline pte_t * | ||
77 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
78 | { | ||
79 | return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); | ||
80 | } | ||
81 | |||
82 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
83 | { | ||
84 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | ||
85 | pte_free(mm, virt_to_page(pte)); | ||
86 | } | ||
87 | |||
88 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
89 | unsigned long address); | ||
90 | |||
91 | #define check_pgt_cache() do { } while (0) | ||
92 | |||
93 | /* | ||
94 | * Get the small-page pte_t lowmem entry for a given pfn. | ||
95 | * This may or may not be in use, depending on whether the initial | ||
96 | * huge-page entry for the page has already been shattered. | ||
97 | */ | ||
98 | pte_t *get_prealloc_pte(unsigned long pfn); | ||
99 | |||
100 | /* During init, we can shatter kernel huge pages if needed. */ | ||
101 | void shatter_pmd(pmd_t *pmd); | ||
102 | |||
103 | #ifdef __tilegx__ | ||
104 | /* We share a single page allocator for both L1 and L2 page tables. */ | ||
105 | #if HV_L1_SIZE != HV_L2_SIZE | ||
106 | # error Rework assumption that L1 and L2 page tables are same size. | ||
107 | #endif | ||
108 | #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER | ||
109 | #define pud_populate(mm, pud, pmd) \ | ||
110 | pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) | ||
111 | #define pmd_alloc_one(mm, addr) \ | ||
112 | ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) | ||
113 | #define pmd_free(mm, pmdp) \ | ||
114 | pte_free((mm), virt_to_page(pmdp)) | ||
115 | #define __pmd_free_tlb(tlb, pmdp, address) \ | ||
116 | __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) | ||
117 | #endif | ||
118 | |||
119 | #endif /* _ASM_TILE_PGALLOC_H */ | ||
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h new file mode 100644 index 000000000000..b3367379d537 --- /dev/null +++ b/arch/tile/include/asm/pgtable.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains the functions and defines necessary to modify and use | ||
15 | * the TILE page table tree. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_PGTABLE_H | ||
19 | #define _ASM_TILE_PGTABLE_H | ||
20 | |||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/threads.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/fixmap.h> | ||
32 | #include <asm/system.h> | ||
33 | |||
34 | struct mm_struct; | ||
35 | struct vm_area_struct; | ||
36 | |||
37 | /* | ||
38 | * ZERO_PAGE is a global shared page that is always zero: used | ||
39 | * for zero-mapped memory areas etc.. | ||
40 | */ | ||
41 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
42 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
43 | |||
44 | extern pgd_t swapper_pg_dir[]; | ||
45 | extern pgprot_t swapper_pgprot; | ||
46 | extern struct kmem_cache *pgd_cache; | ||
47 | extern spinlock_t pgd_lock; | ||
48 | extern struct list_head pgd_list; | ||
49 | |||
50 | /* | ||
51 | * The very last slots in the pgd_t are for addresses unusable by Linux | ||
52 | * (pgd_addr_invalid() returns true). So we use them for the list structure. | ||
53 | * The x86 code we are modelled on uses the page->private/index fields | ||
54 | * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since | ||
55 | * our pgds are so much smaller than a page, it seems a waste to | ||
56 | * spend a whole page on each pgd. | ||
57 | */ | ||
58 | #define PGD_LIST_OFFSET \ | ||
59 | ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head)) | ||
60 | #define pgd_to_list(pgd) \ | ||
61 | ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET)) | ||
62 | #define list_to_pgd(list) \ | ||
63 | ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET)) | ||
64 | |||
65 | extern void pgtable_cache_init(void); | ||
66 | extern void paging_init(void); | ||
67 | extern void set_page_homes(void); | ||
68 | |||
69 | #define FIRST_USER_ADDRESS 0 | ||
70 | |||
71 | #define _PAGE_PRESENT HV_PTE_PRESENT | ||
72 | #define _PAGE_HUGE_PAGE HV_PTE_PAGE | ||
73 | #define _PAGE_READABLE HV_PTE_READABLE | ||
74 | #define _PAGE_WRITABLE HV_PTE_WRITABLE | ||
75 | #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE | ||
76 | #define _PAGE_ACCESSED HV_PTE_ACCESSED | ||
77 | #define _PAGE_DIRTY HV_PTE_DIRTY | ||
78 | #define _PAGE_GLOBAL HV_PTE_GLOBAL | ||
79 | #define _PAGE_USER HV_PTE_USER | ||
80 | |||
81 | /* | ||
82 | * All the "standard" bits. Cache-control bits are managed elsewhere. | ||
83 | * This is used to test for valid level-2 page table pointers by checking | ||
84 | * all the bits, and to mask away the cache control bits for mprotect. | ||
85 | */ | ||
86 | #define _PAGE_ALL (\ | ||
87 | _PAGE_PRESENT | \ | ||
88 | _PAGE_HUGE_PAGE | \ | ||
89 | _PAGE_READABLE | \ | ||
90 | _PAGE_WRITABLE | \ | ||
91 | _PAGE_EXECUTABLE | \ | ||
92 | _PAGE_ACCESSED | \ | ||
93 | _PAGE_DIRTY | \ | ||
94 | _PAGE_GLOBAL | \ | ||
95 | _PAGE_USER \ | ||
96 | ) | ||
97 | |||
98 | #define PAGE_NONE \ | ||
99 | __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | ||
100 | #define PAGE_SHARED \ | ||
101 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
102 | _PAGE_USER | _PAGE_ACCESSED) | ||
103 | |||
104 | #define PAGE_SHARED_EXEC \ | ||
105 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
106 | _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED) | ||
107 | #define PAGE_COPY_NOEXEC \ | ||
108 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
109 | #define PAGE_COPY_EXEC \ | ||
110 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
111 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
112 | #define PAGE_COPY \ | ||
113 | PAGE_COPY_NOEXEC | ||
114 | #define PAGE_READONLY \ | ||
115 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
116 | #define PAGE_READONLY_EXEC \ | ||
117 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
118 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
119 | |||
120 | #define _PAGE_KERNEL_RO \ | ||
121 | (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED) | ||
122 | #define _PAGE_KERNEL \ | ||
123 | (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY) | ||
124 | #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE) | ||
125 | |||
126 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
127 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) | ||
128 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) | ||
129 | |||
130 | #define page_to_kpgprot(p) PAGE_KERNEL | ||
131 | |||
132 | /* | ||
133 | * We could tighten these up, but for now writable or executable | ||
134 | * implies readable. | ||
135 | */ | ||
136 | #define __P000 PAGE_NONE | ||
137 | #define __P001 PAGE_READONLY | ||
138 | #define __P010 PAGE_COPY /* this is write-only, which we won't support */ | ||
139 | #define __P011 PAGE_COPY | ||
140 | #define __P100 PAGE_READONLY_EXEC | ||
141 | #define __P101 PAGE_READONLY_EXEC | ||
142 | #define __P110 PAGE_COPY_EXEC | ||
143 | #define __P111 PAGE_COPY_EXEC | ||
144 | |||
145 | #define __S000 PAGE_NONE | ||
146 | #define __S001 PAGE_READONLY | ||
147 | #define __S010 PAGE_SHARED | ||
148 | #define __S011 PAGE_SHARED | ||
149 | #define __S100 PAGE_READONLY_EXEC | ||
150 | #define __S101 PAGE_READONLY_EXEC | ||
151 | #define __S110 PAGE_SHARED_EXEC | ||
152 | #define __S111 PAGE_SHARED_EXEC | ||
153 | |||
154 | /* | ||
155 | * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT | ||
156 | * and PAGE_HUGE_PAGE, which must be one and zero, respectively. | ||
157 | * We set the ignored bits to zero. | ||
158 | */ | ||
159 | #define _PAGE_TABLE _PAGE_PRESENT | ||
160 | |||
161 | /* Inherit the caching flags from the old protection bits. */ | ||
162 | #define pgprot_modify(oldprot, newprot) \ | ||
163 | (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } | ||
164 | |||
165 | /* Just setting the PFN to zero suffices. */ | ||
166 | #define pte_pgprot(x) hv_pte_set_pfn((x), 0) | ||
167 | |||
168 | /* | ||
169 | * For PTEs and PDEs, we must clear the Present bit first when | ||
170 | * clearing a page table entry, so clear the bottom half first and | ||
171 | * enforce ordering with a barrier. | ||
172 | */ | ||
173 | static inline void __pte_clear(pte_t *ptep) | ||
174 | { | ||
175 | #ifdef __tilegx__ | ||
176 | ptep->val = 0; | ||
177 | #else | ||
178 | u32 *tmp = (u32 *)ptep; | ||
179 | tmp[0] = 0; | ||
180 | barrier(); | ||
181 | tmp[1] = 0; | ||
182 | #endif | ||
183 | } | ||
184 | #define pte_clear(mm, addr, ptep) __pte_clear(ptep) | ||
185 | |||
186 | /* | ||
187 | * The following only work if pte_present() is true. | ||
188 | * Undefined behaviour if not.. | ||
189 | */ | ||
190 | #define pte_present hv_pte_get_present | ||
191 | #define pte_user hv_pte_get_user | ||
192 | #define pte_read hv_pte_get_readable | ||
193 | #define pte_dirty hv_pte_get_dirty | ||
194 | #define pte_young hv_pte_get_accessed | ||
195 | #define pte_write hv_pte_get_writable | ||
196 | #define pte_exec hv_pte_get_executable | ||
197 | #define pte_huge hv_pte_get_page | ||
198 | #define pte_rdprotect hv_pte_clear_readable | ||
199 | #define pte_exprotect hv_pte_clear_executable | ||
200 | #define pte_mkclean hv_pte_clear_dirty | ||
201 | #define pte_mkold hv_pte_clear_accessed | ||
202 | #define pte_wrprotect hv_pte_clear_writable | ||
203 | #define pte_mksmall hv_pte_clear_page | ||
204 | #define pte_mkread hv_pte_set_readable | ||
205 | #define pte_mkexec hv_pte_set_executable | ||
206 | #define pte_mkdirty hv_pte_set_dirty | ||
207 | #define pte_mkyoung hv_pte_set_accessed | ||
208 | #define pte_mkwrite hv_pte_set_writable | ||
209 | #define pte_mkhuge hv_pte_set_page | ||
210 | |||
211 | #define pte_special(pte) 0 | ||
212 | #define pte_mkspecial(pte) (pte) | ||
213 | |||
214 | /* | ||
215 | * Use some spare bits in the PTE for user-caching tags. | ||
216 | */ | ||
217 | #define pte_set_forcecache hv_pte_set_client0 | ||
218 | #define pte_get_forcecache hv_pte_get_client0 | ||
219 | #define pte_clear_forcecache hv_pte_clear_client0 | ||
220 | #define pte_set_anyhome hv_pte_set_client1 | ||
221 | #define pte_get_anyhome hv_pte_get_client1 | ||
222 | #define pte_clear_anyhome hv_pte_clear_client1 | ||
223 | |||
224 | /* | ||
225 | * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved. | ||
226 | */ | ||
227 | #define pte_migrating hv_pte_get_migrating | ||
228 | #define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x)) | ||
229 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) | ||
230 | |||
231 | #define pte_ERROR(e) \ | ||
232 | pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) | ||
233 | #define pgd_ERROR(e) \ | ||
234 | pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
235 | |||
236 | /* | ||
237 | * set_pte_order() sets the given PTE and also sanity-checks the | ||
238 | * requested PTE against the page homecaching. Unspecified parts | ||
239 | * of the PTE are filled in when it is written to memory, i.e. all | ||
240 | * caching attributes if "!forcecache", or the home cpu if "anyhome". | ||
241 | */ | ||
242 | extern void set_pte_order(pte_t *ptep, pte_t pte, int order); | ||
243 | |||
244 | #define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0) | ||
245 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
246 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) | ||
247 | |||
248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
249 | |||
250 | static inline int pte_none(pte_t pte) | ||
251 | { | ||
252 | return !pte.val; | ||
253 | } | ||
254 | |||
255 | static inline unsigned long pte_pfn(pte_t pte) | ||
256 | { | ||
257 | return hv_pte_get_pfn(pte); | ||
258 | } | ||
259 | |||
260 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ | ||
261 | extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu); | ||
262 | extern int get_remote_cache_cpu(pgprot_t prot); | ||
263 | |||
264 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
265 | { | ||
266 | return hv_pte_set_pfn(prot, pfn); | ||
267 | } | ||
268 | |||
269 | /* Support for priority mappings. */ | ||
270 | extern void start_mm_caching(struct mm_struct *mm); | ||
271 | extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); | ||
272 | |||
273 | /* | ||
274 | * Support non-linear file mappings (see sys_remap_file_pages). | ||
275 | * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the | ||
276 | * file offset in the 32 high bits. | ||
277 | */ | ||
278 | #define _PAGE_FILE HV_PTE_CLIENT1 | ||
279 | #define PTE_FILE_MAX_BITS 32 | ||
280 | #define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte)) | ||
281 | #define pte_to_pgoff(pte) ((pte).val >> 32) | ||
282 | #define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE }) | ||
283 | |||
284 | /* | ||
285 | * Encode and de-code a swap entry (see <linux/swapops.h>). | ||
286 | * We put the swap file type+offset in the 32 high bits; | ||
287 | * I believe we can just leave the low bits clear. | ||
288 | */ | ||
289 | #define __swp_type(swp) ((swp).val & 0x1f) | ||
290 | #define __swp_offset(swp) ((swp).val >> 5) | ||
291 | #define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) }) | ||
292 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 }) | ||
293 | #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) }) | ||
294 | |||
295 | /* | ||
296 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
297 | * | ||
298 | * dst - pointer to pgd range anwhere on a pgd page | ||
299 | * src - "" | ||
300 | * count - the number of pgds to copy. | ||
301 | * | ||
302 | * dst and src can be on the same page, but the range must not overlap, | ||
303 | * and must not cross a page boundary. | ||
304 | */ | ||
305 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
306 | { | ||
307 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Conversion functions: convert a page and protection to a page entry, | ||
312 | * and a page entry and page directory to the page they refer to. | ||
313 | */ | ||
314 | |||
315 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
316 | |||
317 | /* | ||
318 | * If we are doing an mprotect(), just accept the new vma->vm_page_prot | ||
319 | * value and combine it with the PFN from the old PTE to get a new PTE. | ||
320 | */ | ||
321 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
322 | { | ||
323 | return pfn_pte(hv_pte_get_pfn(pte), newprot); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | ||
328 | * | ||
329 | * This macro returns the index of the entry in the pgd page which would | ||
330 | * control the given virtual address. | ||
331 | */ | ||
332 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
333 | |||
334 | /* | ||
335 | * pgd_offset() returns a (pgd_t *) | ||
336 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's. | ||
337 | */ | ||
338 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
339 | |||
340 | /* | ||
341 | * A shortcut which implies the use of the kernel's pgd, instead | ||
342 | * of a process's. | ||
343 | */ | ||
344 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
345 | |||
346 | #if defined(CONFIG_HIGHPTE) | ||
347 | extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); | ||
348 | #define pte_offset_map(dir, address) \ | ||
349 | _pte_offset_map(dir, address, KM_PTE0) | ||
350 | #define pte_offset_map_nested(dir, address) \ | ||
351 | _pte_offset_map(dir, address, KM_PTE1) | ||
352 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
353 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
354 | #else | ||
355 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | ||
356 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
357 | #define pte_unmap(pte) do { } while (0) | ||
358 | #define pte_unmap_nested(pte) do { } while (0) | ||
359 | #endif | ||
360 | |||
361 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ | ||
362 | #define kpte_clear_flush(ptep, vaddr) \ | ||
363 | do { \ | ||
364 | pte_clear(&init_mm, (vaddr), (ptep)); \ | ||
365 | local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \ | ||
366 | } while (0) | ||
367 | |||
368 | /* | ||
369 | * The kernel page tables contain what we need, and we flush when we | ||
370 | * change specific page table entries. | ||
371 | */ | ||
372 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
373 | |||
374 | #ifdef CONFIG_FLATMEM | ||
375 | #define kern_addr_valid(addr) (1) | ||
376 | #endif /* CONFIG_FLATMEM */ | ||
377 | |||
378 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
379 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
380 | |||
381 | extern void vmalloc_sync_all(void); | ||
382 | |||
383 | #endif /* !__ASSEMBLY__ */ | ||
384 | |||
385 | #ifdef __tilegx__ | ||
386 | #include <asm/pgtable_64.h> | ||
387 | #else | ||
388 | #include <asm/pgtable_32.h> | ||
389 | #endif | ||
390 | |||
391 | #ifndef __ASSEMBLY__ | ||
392 | |||
393 | static inline int pmd_none(pmd_t pmd) | ||
394 | { | ||
395 | /* | ||
396 | * Only check low word on 32-bit platforms, since it might be | ||
397 | * out of sync with upper half. | ||
398 | */ | ||
399 | return (unsigned long)pmd_val(pmd) == 0; | ||
400 | } | ||
401 | |||
402 | static inline int pmd_present(pmd_t pmd) | ||
403 | { | ||
404 | return pmd_val(pmd) & _PAGE_PRESENT; | ||
405 | } | ||
406 | |||
407 | static inline int pmd_bad(pmd_t pmd) | ||
408 | { | ||
409 | return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE); | ||
410 | } | ||
411 | |||
412 | static inline unsigned long pages_to_mb(unsigned long npg) | ||
413 | { | ||
414 | return npg >> (20 - PAGE_SHIFT); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD] | ||
419 | * | ||
420 | * This function returns the index of the entry in the pmd which would | ||
421 | * control the given virtual address. | ||
422 | */ | ||
423 | static inline unsigned long pmd_index(unsigned long address) | ||
424 | { | ||
425 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * A given kernel pmd_t maps to a specific virtual address (either a | ||
430 | * kernel huge page or a kernel pte_t table). Since kernel pte_t | ||
431 | * tables can be aligned at sub-page granularity, this function can | ||
432 | * return non-page-aligned pointers, despite its name. | ||
433 | */ | ||
434 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | ||
435 | { | ||
436 | phys_addr_t pa = | ||
437 | (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN; | ||
438 | return (unsigned long)__va(pa); | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * A pmd_t points to the base of a huge page or to a pte_t array. | ||
443 | * If a pte_t array, since we can have multiple per page, we don't | ||
444 | * have a one-to-one mapping of pmd_t's to pages. However, this is | ||
445 | * OK for pte_lockptr(), since we just end up with potentially one | ||
446 | * lock being used for several pte_t arrays. | ||
447 | */ | ||
448 | #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) | ||
449 | |||
450 | /* | ||
451 | * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | ||
452 | * | ||
453 | * This macro returns the index of the entry in the pte page which would | ||
454 | * control the given virtual address. | ||
455 | */ | ||
456 | static inline unsigned long pte_index(unsigned long address) | ||
457 | { | ||
458 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | ||
459 | } | ||
460 | |||
461 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | ||
462 | { | ||
463 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | ||
464 | } | ||
465 | |||
466 | static inline int pmd_huge_page(pmd_t pmd) | ||
467 | { | ||
468 | return pmd_val(pmd) & _PAGE_HUGE_PAGE; | ||
469 | } | ||
470 | |||
471 | #include <asm-generic/pgtable.h> | ||
472 | |||
473 | /* Support /proc/NN/pgtable API. */ | ||
474 | struct seq_file; | ||
475 | int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm, | ||
476 | unsigned long vaddr, pte_t *ptep, void **datap); | ||
477 | |||
478 | #endif /* !__ASSEMBLY__ */ | ||
479 | |||
480 | #endif /* _ASM_TILE_PGTABLE_H */ | ||
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h new file mode 100644 index 000000000000..53ec34884744 --- /dev/null +++ b/arch/tile/include/asm/pgtable_32.h | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_PGTABLE_32_H | ||
17 | #define _ASM_TILE_PGTABLE_32_H | ||
18 | |||
19 | /* | ||
20 | * The level-1 index is defined by the huge page size. A PGD is composed | ||
21 | * of PTRS_PER_PGD pgd_t's and is the top level of the page table. | ||
22 | */ | ||
23 | #define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE | ||
24 | #define PGDIR_SIZE HV_PAGE_SIZE_LARGE | ||
25 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
26 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
27 | |||
28 | /* | ||
29 | * The level-2 index is defined by the difference between the huge | ||
30 | * page size and the normal page size. A PTE is composed of | ||
31 | * PTRS_PER_PTE pte_t's and is the bottom level of the page table. | ||
32 | * Note that the hypervisor docs use PTE for what we call pte_t, so | ||
33 | * this nomenclature is somewhat confusing. | ||
34 | */ | ||
35 | #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | /* | ||
40 | * Right now we initialize only a single pte table. It can be extended | ||
41 | * easily, subsequent pte tables have to be allocated in one physical | ||
42 | * chunk of RAM. | ||
43 | * | ||
44 | * HOWEVER, if we are using an allocation scheme with slop after the | ||
45 | * end of the page table (e.g. where our L2 page tables are 2KB but | ||
46 | * our pages are 64KB and we are allocating via the page allocator) | ||
47 | * we can't extend it easily. | ||
48 | */ | ||
49 | #define LAST_PKMAP PTRS_PER_PTE | ||
50 | |||
51 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK) | ||
52 | |||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | # define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) | ||
55 | #else | ||
56 | # define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1)) | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_HUGEVMAP | ||
60 | #define HUGE_VMAP_END __VMAPPING_END | ||
61 | #define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) | ||
62 | #define _VMALLOC_END HUGE_VMAP_BASE | ||
63 | #else | ||
64 | #define _VMALLOC_END __VMAPPING_END | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Align the vmalloc area to an L2 page table, and leave a guard page | ||
69 | * at the beginning and end. The vmalloc code also puts in an internal | ||
70 | * guard page between each allocation. | ||
71 | */ | ||
72 | #define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) | ||
73 | extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; | ||
74 | #define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE) | ||
75 | #define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) | ||
76 | |||
77 | /* This is the maximum possible amount of lowmem. */ | ||
78 | #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) | ||
79 | |||
80 | /* We have no pmd or pud since we are strictly a two-level page table */ | ||
81 | #include <asm-generic/pgtable-nopmd.h> | ||
82 | |||
83 | /* We don't define any pgds for these addresses. */ | ||
84 | static inline int pgd_addr_invalid(unsigned long addr) | ||
85 | { | ||
86 | return addr >= MEM_HV_INTRPT; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Provide versions of these routines that can be used safely when | ||
91 | * the hypervisor may be asynchronously modifying dirty/accessed bits. | ||
92 | * ptep_get_and_clear() matches the generic one but we provide it to | ||
93 | * be parallel with the 64-bit code. | ||
94 | */ | ||
95 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
96 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
97 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
98 | |||
99 | extern int ptep_test_and_clear_young(struct vm_area_struct *, | ||
100 | unsigned long addr, pte_t *); | ||
101 | extern void ptep_set_wrprotect(struct mm_struct *, | ||
102 | unsigned long addr, pte_t *); | ||
103 | |||
104 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
105 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | ||
106 | unsigned long addr, pte_t *ptep) | ||
107 | { | ||
108 | pte_t pte = *ptep; | ||
109 | pte_clear(_mm, addr, ptep); | ||
110 | return pte; | ||
111 | } | ||
112 | |||
113 | /* Create a pmd from a PTFN. */ | ||
114 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | ||
115 | { | ||
116 | return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } }; | ||
117 | } | ||
118 | |||
119 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
120 | #define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd) | ||
121 | |||
122 | static inline void pmd_clear(pmd_t *pmdp) | ||
123 | { | ||
124 | __pte_clear(&pmdp->pud.pgd); | ||
125 | } | ||
126 | |||
127 | #endif /* __ASSEMBLY__ */ | ||
128 | |||
129 | #endif /* _ASM_TILE_PGTABLE_32_H */ | ||
diff --git a/arch/tile/include/asm/poll.h b/arch/tile/include/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/tile/include/asm/poll.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/poll.h> | |||
diff --git a/arch/tile/include/asm/posix_types.h b/arch/tile/include/asm/posix_types.h new file mode 100644 index 000000000000..22cae6230ceb --- /dev/null +++ b/arch/tile/include/asm/posix_types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/posix_types.h> | |||
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h new file mode 100644 index 000000000000..d942d09b252e --- /dev/null +++ b/arch/tile/include/asm/processor.h | |||
@@ -0,0 +1,338 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PROCESSOR_H | ||
16 | #define _ASM_TILE_PROCESSOR_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | /* | ||
21 | * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one | ||
22 | * normally would, due to #include dependencies. | ||
23 | */ | ||
24 | #include <linux/types.h> | ||
25 | #include <asm/ptrace.h> | ||
26 | #include <asm/percpu.h> | ||
27 | |||
28 | #include <arch/chip.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | struct task_struct; | ||
32 | struct thread_struct; | ||
33 | |||
34 | typedef struct { | ||
35 | unsigned long seg; | ||
36 | } mm_segment_t; | ||
37 | |||
38 | /* | ||
39 | * Default implementation of macro that returns current | ||
40 | * instruction pointer ("program counter"). | ||
41 | */ | ||
42 | void *current_text_addr(void); | ||
43 | |||
44 | #if CHIP_HAS_TILE_DMA() | ||
45 | /* Capture the state of a suspended DMA. */ | ||
46 | struct tile_dma_state { | ||
47 | int enabled; | ||
48 | unsigned long src; | ||
49 | unsigned long dest; | ||
50 | unsigned long strides; | ||
51 | unsigned long chunk_size; | ||
52 | unsigned long src_chunk; | ||
53 | unsigned long dest_chunk; | ||
54 | unsigned long byte; | ||
55 | unsigned long status; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * A mask of the DMA status register for selecting only the 'running' | ||
60 | * and 'done' bits. | ||
61 | */ | ||
62 | #define DMA_STATUS_MASK \ | ||
63 | (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK) | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Track asynchronous TLB events (faults and access violations) | ||
68 | * that occur while we are in kernel mode from DMA or the SN processor. | ||
69 | */ | ||
70 | struct async_tlb { | ||
71 | short fault_num; /* original fault number; 0 if none */ | ||
72 | char is_fault; /* was it a fault (vs an access violation) */ | ||
73 | char is_write; /* for fault: was it caused by a write? */ | ||
74 | unsigned long address; /* what address faulted? */ | ||
75 | }; | ||
76 | |||
77 | #ifdef CONFIG_HARDWALL | ||
78 | struct hardwall_info; | ||
79 | #endif | ||
80 | |||
81 | struct thread_struct { | ||
82 | /* kernel stack pointer */ | ||
83 | unsigned long ksp; | ||
84 | /* kernel PC */ | ||
85 | unsigned long pc; | ||
86 | /* starting user stack pointer (for page migration) */ | ||
87 | unsigned long usp0; | ||
88 | /* pid of process that created this one */ | ||
89 | pid_t creator_pid; | ||
90 | #if CHIP_HAS_TILE_DMA() | ||
91 | /* DMA info for suspended threads (byte == 0 means no DMA state) */ | ||
92 | struct tile_dma_state tile_dma_state; | ||
93 | #endif | ||
94 | /* User EX_CONTEXT registers */ | ||
95 | unsigned long ex_context[2]; | ||
96 | /* User SYSTEM_SAVE registers */ | ||
97 | unsigned long system_save[4]; | ||
98 | /* User interrupt mask */ | ||
99 | unsigned long long interrupt_mask; | ||
100 | /* User interrupt-control 0 state */ | ||
101 | unsigned long intctrl_0; | ||
102 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
103 | /* Any other miscellaneous processor state bits */ | ||
104 | unsigned long proc_status; | ||
105 | #endif | ||
106 | #ifdef CONFIG_HARDWALL | ||
107 | /* Is this task tied to an activated hardwall? */ | ||
108 | struct hardwall_info *hardwall; | ||
109 | /* Chains this task into the list at hardwall->list. */ | ||
110 | struct list_head hardwall_list; | ||
111 | #endif | ||
112 | #if CHIP_HAS_TILE_DMA() | ||
113 | /* Async DMA TLB fault information */ | ||
114 | struct async_tlb dma_async_tlb; | ||
115 | #endif | ||
116 | #if CHIP_HAS_SN_PROC() | ||
117 | /* Was static network processor when we were switched out? */ | ||
118 | int sn_proc_running; | ||
119 | /* Async SNI TLB fault information */ | ||
120 | struct async_tlb sn_async_tlb; | ||
121 | #endif | ||
122 | }; | ||
123 | |||
124 | #endif /* !__ASSEMBLY__ */ | ||
125 | |||
126 | /* | ||
127 | * Start with "sp" this many bytes below the top of the kernel stack. | ||
128 | * This preserves the invariant that a called function may write to *sp. | ||
129 | */ | ||
130 | #define STACK_TOP_DELTA 8 | ||
131 | |||
132 | /* | ||
133 | * When entering the kernel via a fault, start with the top of the | ||
134 | * pt_regs structure this many bytes below the top of the page. | ||
135 | * This aligns the pt_regs structure optimally for cache-line access. | ||
136 | */ | ||
137 | #ifdef __tilegx__ | ||
138 | #define KSTK_PTREGS_GAP 48 | ||
139 | #else | ||
140 | #define KSTK_PTREGS_GAP 56 | ||
141 | #endif | ||
142 | |||
143 | #ifndef __ASSEMBLY__ | ||
144 | |||
145 | #ifdef __tilegx__ | ||
146 | #define TASK_SIZE_MAX (MEM_LOW_END + 1) | ||
147 | #else | ||
148 | #define TASK_SIZE_MAX PAGE_OFFSET | ||
149 | #endif | ||
150 | |||
151 | /* TASK_SIZE and related variables are always checked in "current" context. */ | ||
152 | #ifdef CONFIG_COMPAT | ||
153 | #define COMPAT_TASK_SIZE (1UL << 31) | ||
154 | #define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\ | ||
155 | COMPAT_TASK_SIZE : TASK_SIZE_MAX) | ||
156 | #else | ||
157 | #define TASK_SIZE TASK_SIZE_MAX | ||
158 | #endif | ||
159 | |||
160 | /* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */ | ||
161 | #define VDSO_BASE (TASK_SIZE - PAGE_SIZE) | ||
162 | |||
163 | #define STACK_TOP VDSO_BASE | ||
164 | |||
165 | /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */ | ||
166 | #define STACK_TOP_MAX TASK_SIZE_MAX | ||
167 | |||
168 | /* | ||
169 | * This decides where the kernel will search for a free chunk of vm | ||
170 | * space during mmap's, if it is using bottom-up mapping. | ||
171 | */ | ||
172 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
173 | |||
174 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
175 | |||
176 | #define INIT_THREAD { \ | ||
177 | .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \ | ||
178 | .interrupt_mask = -1ULL \ | ||
179 | } | ||
180 | |||
181 | /* Kernel stack top for the task that first boots on this cpu. */ | ||
182 | DECLARE_PER_CPU(unsigned long, boot_sp); | ||
183 | |||
184 | /* PC to boot from on this cpu. */ | ||
185 | DECLARE_PER_CPU(unsigned long, boot_pc); | ||
186 | |||
187 | /* Do necessary setup to start up a newly executed thread. */ | ||
188 | static inline void start_thread(struct pt_regs *regs, | ||
189 | unsigned long pc, unsigned long usp) | ||
190 | { | ||
191 | regs->pc = pc; | ||
192 | regs->sp = usp; | ||
193 | } | ||
194 | |||
195 | /* Free all resources held by a thread. */ | ||
196 | static inline void release_thread(struct task_struct *dead_task) | ||
197 | { | ||
198 | /* Nothing for now */ | ||
199 | } | ||
200 | |||
201 | /* Prepare to copy thread state - unlazy all lazy status. */ | ||
202 | #define prepare_to_copy(tsk) do { } while (0) | ||
203 | |||
204 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
205 | |||
206 | |||
207 | /* | ||
208 | * Return saved (kernel) PC of a blocked thread. | ||
209 | * Only used in a printk() in kernel/sched.c, so don't work too hard. | ||
210 | */ | ||
211 | #define thread_saved_pc(t) ((t)->thread.pc) | ||
212 | |||
213 | unsigned long get_wchan(struct task_struct *p); | ||
214 | |||
215 | /* Return initial ksp value for given task. */ | ||
216 | #define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE) | ||
217 | |||
218 | /* Return some info about the user process TASK. */ | ||
219 | #define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA) | ||
220 | #define task_pt_regs(task) \ | ||
221 | ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1) | ||
222 | #define task_sp(task) (task_pt_regs(task)->sp) | ||
223 | #define task_pc(task) (task_pt_regs(task)->pc) | ||
224 | /* Aliases for pc and sp (used in fs/proc/array.c) */ | ||
225 | #define KSTK_EIP(task) task_pc(task) | ||
226 | #define KSTK_ESP(task) task_sp(task) | ||
227 | |||
228 | /* Standard format for printing registers and other word-size data. */ | ||
229 | #ifdef __tilegx__ | ||
230 | # define REGFMT "0x%016lx" | ||
231 | #else | ||
232 | # define REGFMT "0x%08lx" | ||
233 | #endif | ||
234 | |||
235 | /* | ||
236 | * Do some slow action (e.g. read a slow SPR). | ||
237 | * Note that this must also have compiler-barrier semantics since | ||
238 | * it may be used in a busy loop reading memory. | ||
239 | */ | ||
240 | static inline void cpu_relax(void) | ||
241 | { | ||
242 | __insn_mfspr(SPR_PASS); | ||
243 | barrier(); | ||
244 | } | ||
245 | |||
246 | struct siginfo; | ||
247 | extern void arch_coredump_signal(struct siginfo *, struct pt_regs *); | ||
248 | #define arch_coredump_signal arch_coredump_signal | ||
249 | |||
250 | /* Info on this processor (see fs/proc/cpuinfo.c) */ | ||
251 | struct seq_operations; | ||
252 | extern const struct seq_operations cpuinfo_op; | ||
253 | |||
254 | /* Provide information about the chip model. */ | ||
255 | extern char chip_model[64]; | ||
256 | |||
257 | /* Data on which physical memory controller corresponds to which NUMA node. */ | ||
258 | extern int node_controller[]; | ||
259 | |||
260 | |||
261 | /* Do we dump information to the console when a user application crashes? */ | ||
262 | extern int show_crashinfo; | ||
263 | |||
264 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
265 | /* Does the heap allocator return hash-for-home pages by default? */ | ||
266 | extern int hash_default; | ||
267 | |||
268 | /* Should kernel stack pages be hash-for-home? */ | ||
269 | extern int kstack_hash; | ||
270 | |||
271 | /* Does MAP_ANONYMOUS return hash-for-home pages by default? */ | ||
272 | #define uheap_hash hash_default | ||
273 | |||
274 | #else | ||
275 | #define hash_default 0 | ||
276 | #define kstack_hash 0 | ||
277 | #define uheap_hash 0 | ||
278 | #endif | ||
279 | |||
280 | /* Are we using huge pages in the TLB for kernel data? */ | ||
281 | extern int kdata_huge; | ||
282 | |||
283 | #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() | ||
284 | |||
285 | #else /* __ASSEMBLY__ */ | ||
286 | |||
287 | /* Do some slow action (e.g. read a slow SPR). */ | ||
288 | #define CPU_RELAX mfspr zero, SPR_PASS | ||
289 | |||
290 | #endif /* !__ASSEMBLY__ */ | ||
291 | |||
292 | /* Assembly code assumes that the PL is in the low bits. */ | ||
293 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0 | ||
294 | # error Fix assembly assumptions about PL | ||
295 | #endif | ||
296 | |||
297 | /* We sometimes use these macros for EX_CONTEXT_0_1 as well. */ | ||
298 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \ | ||
299 | SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \ | ||
300 | SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \ | ||
301 | SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK | ||
302 | # error Fix assumptions that EX1 macros work for both PL0 and PL1 | ||
303 | #endif | ||
304 | |||
305 | /* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */ | ||
306 | #define EX1_PL(ex1) \ | ||
307 | (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK) | ||
308 | #define EX1_ICS(ex1) \ | ||
309 | (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK) | ||
310 | #define PL_ICS_EX1(pl, ics) \ | ||
311 | (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \ | ||
312 | ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT)) | ||
313 | |||
314 | /* | ||
315 | * Provide symbolic constants for PLs. | ||
316 | * Note that assembly code assumes that USER_PL is zero. | ||
317 | */ | ||
318 | #define USER_PL 0 | ||
319 | #define KERNEL_PL 1 | ||
320 | |||
321 | /* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ | ||
322 | #define CPU_LOG_MASK_VALUE 12 | ||
323 | #define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) | ||
324 | #if CONFIG_NR_CPUS > CPU_MASK_VALUE | ||
325 | # error Too many cpus! | ||
326 | #endif | ||
327 | #define raw_smp_processor_id() \ | ||
328 | ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) | ||
329 | #define get_current_ksp0() \ | ||
330 | (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) | ||
331 | #define next_current_ksp0(task) ({ \ | ||
332 | unsigned long __ksp0 = task_ksp0(task); \ | ||
333 | int __cpu = raw_smp_processor_id(); \ | ||
334 | BUG_ON(__ksp0 & CPU_MASK_VALUE); \ | ||
335 | __ksp0 | __cpu; \ | ||
336 | }) | ||
337 | |||
338 | #endif /* _ASM_TILE_PROCESSOR_H */ | ||
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h new file mode 100644 index 000000000000..acdae814e016 --- /dev/null +++ b/arch/tile/include/asm/ptrace.h | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PTRACE_H | ||
16 | #define _ASM_TILE_PTRACE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | #include <arch/abi.h> | ||
20 | |||
21 | /* These must match struct pt_regs, below. */ | ||
22 | #if CHIP_WORD_SIZE() == 32 | ||
23 | #define PTREGS_OFFSET_REG(n) ((n)*4) | ||
24 | #else | ||
25 | #define PTREGS_OFFSET_REG(n) ((n)*8) | ||
26 | #endif | ||
27 | #define PTREGS_OFFSET_BASE 0 | ||
28 | #define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53) | ||
29 | #define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54) | ||
30 | #define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55) | ||
31 | #define PTREGS_NR_GPRS 56 | ||
32 | #define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56) | ||
33 | #define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57) | ||
34 | #define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58) | ||
35 | #define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59) | ||
36 | #define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60) | ||
37 | #if CHIP_HAS_CMPEXCH() | ||
38 | #define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61) | ||
39 | #endif | ||
40 | #define PTREGS_SIZE PTREGS_OFFSET_REG(64) | ||
41 | |||
42 | #ifndef __ASSEMBLY__ | ||
43 | |||
44 | #ifdef __KERNEL__ | ||
45 | /* Benefit from consistent use of "long" on all chips. */ | ||
46 | typedef unsigned long pt_reg_t; | ||
47 | #else | ||
48 | /* Provide appropriate length type to userspace regardless of -m32/-m64. */ | ||
49 | typedef uint_reg_t pt_reg_t; | ||
50 | #endif | ||
51 | |||
52 | /* | ||
53 | * This struct defines the way the registers are stored on the stack during a | ||
54 | * system call/exception. It should be a multiple of 8 bytes to preserve | ||
55 | * normal stack alignment rules. | ||
56 | * | ||
57 | * Must track <sys/ucontext.h> and <sys/procfs.h> | ||
58 | */ | ||
59 | struct pt_regs { | ||
60 | /* Saved main processor registers; 56..63 are special. */ | ||
61 | /* tp, sp, and lr must immediately follow regs[] for aliasing. */ | ||
62 | pt_reg_t regs[53]; | ||
63 | pt_reg_t tp; /* aliases regs[TREG_TP] */ | ||
64 | pt_reg_t sp; /* aliases regs[TREG_SP] */ | ||
65 | pt_reg_t lr; /* aliases regs[TREG_LR] */ | ||
66 | |||
67 | /* Saved special registers. */ | ||
68 | pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ | ||
69 | pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ | ||
70 | pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ | ||
71 | pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ | ||
72 | pt_reg_t flags; /* flags (see below) */ | ||
73 | #if !CHIP_HAS_CMPEXCH() | ||
74 | pt_reg_t pad[3]; | ||
75 | #else | ||
76 | pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */ | ||
77 | pt_reg_t pad[2]; | ||
78 | #endif | ||
79 | }; | ||
80 | |||
81 | #endif /* __ASSEMBLY__ */ | ||
82 | |||
83 | /* Flag bits in pt_regs.flags */ | ||
84 | #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ | ||
85 | #define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ | ||
86 | #define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ | ||
87 | |||
88 | #define PTRACE_GETREGS 12 | ||
89 | #define PTRACE_SETREGS 13 | ||
90 | #define PTRACE_GETFPREGS 14 | ||
91 | #define PTRACE_SETFPREGS 15 | ||
92 | |||
93 | /* Support TILE-specific ptrace options, with events starting at 16. */ | ||
94 | #define PTRACE_O_TRACEMIGRATE 0x00010000 | ||
95 | #define PTRACE_EVENT_MIGRATE 16 | ||
96 | #ifdef __KERNEL__ | ||
97 | #define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE) | ||
98 | #define PT_TRACE_MIGRATE 0x00080000 | ||
99 | #define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __KERNEL__ | ||
103 | |||
104 | #ifndef __ASSEMBLY__ | ||
105 | |||
106 | #define instruction_pointer(regs) ((regs)->pc) | ||
107 | #define profile_pc(regs) instruction_pointer(regs) | ||
108 | |||
109 | /* Does the process account for user or for system time? */ | ||
110 | #define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL) | ||
111 | |||
112 | /* Fill in a struct pt_regs with the current kernel registers. */ | ||
113 | struct pt_regs *get_pt_regs(struct pt_regs *); | ||
114 | |||
115 | /* Trace the current syscall. */ | ||
116 | extern void do_syscall_trace(void); | ||
117 | |||
118 | extern void show_regs(struct pt_regs *); | ||
119 | |||
120 | #define arch_has_single_step() (1) | ||
121 | |||
122 | /* | ||
123 | * A structure for all single-stepper state. | ||
124 | * | ||
125 | * Also update defines in assembler section if it changes | ||
126 | */ | ||
127 | struct single_step_state { | ||
128 | /* the page to which we will write hacked-up bundles */ | ||
129 | void __user *buffer; | ||
130 | |||
131 | union { | ||
132 | int flags; | ||
133 | struct { | ||
134 | unsigned long is_enabled:1, update:1, update_reg:6; | ||
135 | }; | ||
136 | }; | ||
137 | |||
138 | unsigned long orig_pc; /* the original PC */ | ||
139 | unsigned long next_pc; /* return PC if no branch (PC + 1) */ | ||
140 | unsigned long branch_next_pc; /* return PC if we did branch/jump */ | ||
141 | unsigned long update_value; /* value to restore to update_target */ | ||
142 | }; | ||
143 | |||
144 | /* Single-step the instruction at regs->pc */ | ||
145 | extern void single_step_once(struct pt_regs *regs); | ||
146 | |||
147 | struct task_struct; | ||
148 | |||
149 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | ||
150 | int error_code); | ||
151 | |||
152 | #ifdef __tilegx__ | ||
153 | /* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */ | ||
154 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
155 | #endif | ||
156 | |||
157 | #endif /* !__ASSEMBLY__ */ | ||
158 | |||
159 | #define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1 | ||
160 | #define SINGLESTEP_STATE_MASK_UPDATE 0x2 | ||
161 | #define SINGLESTEP_STATE_TARGET_LB 2 | ||
162 | #define SINGLESTEP_STATE_TARGET_UB 7 | ||
163 | |||
164 | #endif /* !__KERNEL__ */ | ||
165 | |||
166 | #endif /* _ASM_TILE_PTRACE_H */ | ||
diff --git a/arch/tile/include/asm/resource.h b/arch/tile/include/asm/resource.h new file mode 100644 index 000000000000..04bc4db8921b --- /dev/null +++ b/arch/tile/include/asm/resource.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/resource.h> | |||
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h new file mode 100644 index 000000000000..c5604242c0d5 --- /dev/null +++ b/arch/tile/include/asm/scatterlist.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SCATTERLIST_H | ||
16 | #define _ASM_TILE_SCATTERLIST_H | ||
17 | |||
18 | #define ISA_DMA_THRESHOLD (~0UL) | ||
19 | |||
20 | #include <asm-generic/scatterlist.h> | ||
21 | |||
22 | #endif /* _ASM_TILE_SCATTERLIST_H */ | ||
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h new file mode 100644 index 000000000000..d062d463fca9 --- /dev/null +++ b/arch/tile/include/asm/sections.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SECTIONS_H | ||
16 | #define _ASM_TILE_SECTIONS_H | ||
17 | |||
18 | #define arch_is_kernel_data arch_is_kernel_data | ||
19 | |||
20 | #include <asm-generic/sections.h> | ||
21 | |||
22 | /* Text and data are at different areas in the kernel VA space. */ | ||
23 | extern char _sinitdata[], _einitdata[]; | ||
24 | |||
25 | /* Write-once data is writable only till the end of initialization. */ | ||
26 | extern char __w1data_begin[], __w1data_end[]; | ||
27 | |||
28 | |||
29 | /* Not exactly sections, but PC comparison points in the code. */ | ||
30 | extern char __rt_sigreturn[], __rt_sigreturn_end[]; | ||
31 | #ifndef __tilegx__ | ||
32 | extern char sys_cmpxchg[], __sys_cmpxchg_end[]; | ||
33 | extern char __sys_cmpxchg_grab_lock[]; | ||
34 | extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; | ||
35 | #endif | ||
36 | |||
37 | /* Handle the discontiguity between _sdata and _stext. */ | ||
38 | static inline int arch_is_kernel_data(unsigned long addr) | ||
39 | { | ||
40 | return addr >= (unsigned long)_sdata && | ||
41 | addr < (unsigned long)_end; | ||
42 | } | ||
43 | |||
44 | #endif /* _ASM_TILE_SECTIONS_H */ | ||
diff --git a/arch/tile/include/asm/sembuf.h b/arch/tile/include/asm/sembuf.h new file mode 100644 index 000000000000..7673b83cfef7 --- /dev/null +++ b/arch/tile/include/asm/sembuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sembuf.h> | |||
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h new file mode 100644 index 000000000000..823ddd47ff6e --- /dev/null +++ b/arch/tile/include/asm/setup.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SETUP_H | ||
16 | #define _ASM_TILE_SETUP_H | ||
17 | |||
18 | #include <linux/pfn.h> | ||
19 | #include <linux/init.h> | ||
20 | |||
21 | /* | ||
22 | * Reserved space for vmalloc and iomap - defined in asm/page.h | ||
23 | */ | ||
24 | #define MAXMEM_PFN PFN_DOWN(MAXMEM) | ||
25 | |||
26 | #define COMMAND_LINE_SIZE 2048 | ||
27 | |||
28 | void early_panic(const char *fmt, ...); | ||
29 | void warn_early_printk(void); | ||
30 | void __init disable_early_printk(void); | ||
31 | |||
32 | #endif /* _ASM_TILE_SETUP_H */ | ||
diff --git a/arch/tile/include/asm/shmbuf.h b/arch/tile/include/asm/shmbuf.h new file mode 100644 index 000000000000..83c05fc2de38 --- /dev/null +++ b/arch/tile/include/asm/shmbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmbuf.h> | |||
diff --git a/arch/tile/include/asm/shmparam.h b/arch/tile/include/asm/shmparam.h new file mode 100644 index 000000000000..93f30deb95d0 --- /dev/null +++ b/arch/tile/include/asm/shmparam.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmparam.h> | |||
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h new file mode 100644 index 000000000000..7cd7672e3ad4 --- /dev/null +++ b/arch/tile/include/asm/sigcontext.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGCONTEXT_H | ||
16 | #define _ASM_TILE_SIGCONTEXT_H | ||
17 | |||
18 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | /* Must track <sys/ucontext.h> */ | ||
22 | |||
23 | struct sigcontext { | ||
24 | struct pt_regs regs; | ||
25 | }; | ||
26 | |||
27 | #endif /* _ASM_TILE_SIGCONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/sigframe.h b/arch/tile/include/asm/sigframe.h new file mode 100644 index 000000000000..994d3d30205f --- /dev/null +++ b/arch/tile/include/asm/sigframe.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGFRAME_H | ||
16 | #define _ASM_TILE_SIGFRAME_H | ||
17 | |||
18 | /* Indicate that syscall return should not examine r0 */ | ||
19 | #define INT_SWINT_1_SIGRETURN (~0) | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | #include <arch/abi.h> | ||
24 | |||
25 | struct rt_sigframe { | ||
26 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
27 | struct siginfo info; | ||
28 | struct ucontext uc; | ||
29 | }; | ||
30 | |||
31 | #endif /* !__ASSEMBLY__ */ | ||
32 | |||
33 | #endif /* _ASM_TILE_SIGFRAME_H */ | ||
diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h new file mode 100644 index 000000000000..0c12d1b9ddf2 --- /dev/null +++ b/arch/tile/include/asm/siginfo.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGINFO_H | ||
16 | #define _ASM_TILE_SIGINFO_H | ||
17 | |||
18 | #define __ARCH_SI_TRAPNO | ||
19 | |||
20 | #include <asm-generic/siginfo.h> | ||
21 | |||
22 | /* | ||
23 | * Additional Tile-specific SIGILL si_codes | ||
24 | */ | ||
25 | #define ILL_DBLFLT (__SI_FAULT|9) /* double fault */ | ||
26 | #define ILL_HARDWALL (__SI_FAULT|10) /* user networks hardwall violation */ | ||
27 | #undef NSIGILL | ||
28 | #define NSIGILL 10 | ||
29 | |||
30 | #endif /* _ASM_TILE_SIGINFO_H */ | ||
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h new file mode 100644 index 000000000000..eb0253f32202 --- /dev/null +++ b/arch/tile/include/asm/signal.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGNAL_H | ||
16 | #define _ASM_TILE_SIGNAL_H | ||
17 | |||
18 | /* Do not notify a ptracer when this signal is handled. */ | ||
19 | #define SA_NOPTRACE 0x02000000u | ||
20 | |||
21 | /* Used in earlier Tilera releases, so keeping for binary compatibility. */ | ||
22 | #define SA_RESTORER 0x04000000u | ||
23 | |||
24 | #include <asm-generic/signal.h> | ||
25 | |||
26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | ||
27 | int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); | ||
28 | int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); | ||
29 | void do_signal(struct pt_regs *regs); | ||
30 | #endif | ||
31 | |||
32 | #endif /* _ASM_TILE_SIGNAL_H */ | ||
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h new file mode 100644 index 000000000000..532124ae4b12 --- /dev/null +++ b/arch/tile/include/asm/smp.h | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SMP_H | ||
16 | #define _ASM_TILE_SMP_H | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | |||
20 | #include <asm/processor.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/irqreturn.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | /* Set up this tile to support receiving hypervisor messages */ | ||
26 | void init_messaging(void); | ||
27 | |||
28 | /* Set up this tile to support receiving device interrupts and IPIs. */ | ||
29 | void init_per_tile_IRQs(void); | ||
30 | |||
31 | /* Send a message to processors specified in mask */ | ||
32 | void send_IPI_many(const struct cpumask *mask, int tag); | ||
33 | |||
34 | /* Send a message to all but the sending processor */ | ||
35 | void send_IPI_allbutself(int tag); | ||
36 | |||
37 | /* Send a message to a specific processor */ | ||
38 | void send_IPI_single(int dest, int tag); | ||
39 | |||
40 | /* Process an IPI message */ | ||
41 | void evaluate_message(int tag); | ||
42 | |||
43 | /* Boot a secondary cpu */ | ||
44 | void online_secondary(void); | ||
45 | |||
46 | /* Call a function on a specified set of CPUs (may include this one). */ | ||
47 | extern void on_each_cpu_mask(const struct cpumask *mask, | ||
48 | void (*func)(void *), void *info, bool wait); | ||
49 | |||
50 | /* Topology of the supervisor tile grid, and coordinates of boot processor */ | ||
51 | extern HV_Topology smp_topology; | ||
52 | |||
53 | /* Accessors for grid size */ | ||
54 | #define smp_height (smp_topology.height) | ||
55 | #define smp_width (smp_topology.width) | ||
56 | |||
57 | /* Convenience functions for converting cpu <-> coords. */ | ||
58 | static inline int cpu_x(int cpu) | ||
59 | { | ||
60 | return cpu % smp_width; | ||
61 | } | ||
62 | static inline int cpu_y(int cpu) | ||
63 | { | ||
64 | return cpu / smp_width; | ||
65 | } | ||
66 | static inline int xy_to_cpu(int x, int y) | ||
67 | { | ||
68 | return y * smp_width + x; | ||
69 | } | ||
70 | |||
71 | /* Hypervisor message tags sent via the tile send_IPI*() routines. */ | ||
72 | #define MSG_TAG_START_CPU 1 | ||
73 | #define MSG_TAG_STOP_CPU 2 | ||
74 | #define MSG_TAG_CALL_FUNCTION_MANY 3 | ||
75 | #define MSG_TAG_CALL_FUNCTION_SINGLE 4 | ||
76 | |||
77 | /* Hook for the generic smp_call_function_many() routine. */ | ||
78 | static inline void arch_send_call_function_ipi_mask(struct cpumask *mask) | ||
79 | { | ||
80 | send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY); | ||
81 | } | ||
82 | |||
83 | /* Hook for the generic smp_call_function_single() routine. */ | ||
84 | static inline void arch_send_call_function_single_ipi(int cpu) | ||
85 | { | ||
86 | send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE); | ||
87 | } | ||
88 | |||
89 | /* Print out the boot string describing which cpus were disabled. */ | ||
90 | void print_disabled_cpus(void); | ||
91 | |||
92 | #else /* !CONFIG_SMP */ | ||
93 | |||
94 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
95 | do { if (cpumask_test_cpu(0, (mask))) func(info); } while (0) | ||
96 | |||
97 | #define smp_master_cpu 0 | ||
98 | #define smp_height 1 | ||
99 | #define smp_width 1 | ||
100 | #define cpu_x(cpu) 0 | ||
101 | #define cpu_y(cpu) 0 | ||
102 | #define xy_to_cpu(x, y) 0 | ||
103 | |||
104 | #endif /* !CONFIG_SMP */ | ||
105 | |||
106 | |||
107 | /* Which cpus may be used as the lotar in a page table entry. */ | ||
108 | extern struct cpumask cpu_lotar_map; | ||
109 | #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) | ||
110 | |||
111 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
112 | /* Which processors are used for hash-for-home mapping */ | ||
113 | extern struct cpumask hash_for_home_map; | ||
114 | #endif | ||
115 | |||
116 | /* Which cpus can have their cache flushed by hv_flush_remote(). */ | ||
117 | extern struct cpumask cpu_cacheable_map; | ||
118 | #define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map) | ||
119 | |||
120 | /* Convert an HV_LOTAR value into a cpu. */ | ||
121 | static inline int hv_lotar_to_cpu(HV_LOTAR lotar) | ||
122 | { | ||
123 | return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Extension of <linux/cpumask.h> functionality when you just want | ||
128 | * to express a mask or suppression or inclusion region without | ||
129 | * being too concerned about exactly which cpus are valid in that region. | ||
130 | */ | ||
131 | int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits); | ||
132 | |||
133 | #define cpulist_parse_crop(buf, dst) \ | ||
134 | __cpulist_parse_crop((buf), (dst), NR_CPUS) | ||
135 | static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp, | ||
136 | int nbits) | ||
137 | { | ||
138 | return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits); | ||
139 | } | ||
140 | |||
141 | /* Initialize the IPI subsystem. */ | ||
142 | void ipi_init(void); | ||
143 | |||
144 | /* Function for start-cpu message to cause us to jump to. */ | ||
145 | extern unsigned long start_cpu_function_addr; | ||
146 | |||
147 | #endif /* _ASM_TILE_SMP_H */ | ||
diff --git a/arch/tile/include/asm/socket.h b/arch/tile/include/asm/socket.h new file mode 100644 index 000000000000..6b71384b9d8b --- /dev/null +++ b/arch/tile/include/asm/socket.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/socket.h> | |||
diff --git a/arch/tile/include/asm/sockios.h b/arch/tile/include/asm/sockios.h new file mode 100644 index 000000000000..def6d4746ee7 --- /dev/null +++ b/arch/tile/include/asm/sockios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sockios.h> | |||
diff --git a/arch/tile/include/asm/spinlock.h b/arch/tile/include/asm/spinlock.h new file mode 100644 index 000000000000..1a8bd4740c28 --- /dev/null +++ b/arch/tile/include/asm/spinlock.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_H | ||
16 | #define _ASM_TILE_SPINLOCK_H | ||
17 | |||
18 | #ifdef __tilegx__ | ||
19 | #include <asm/spinlock_64.h> | ||
20 | #else | ||
21 | #include <asm/spinlock_32.h> | ||
22 | #endif | ||
23 | |||
24 | #endif /* _ASM_TILE_SPINLOCK_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h new file mode 100644 index 000000000000..88efdde8dd2b --- /dev/null +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * 32-bit SMP spinlocks. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_SPINLOCK_32_H | ||
18 | #define _ASM_TILE_SPINLOCK_32_H | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <linux/compiler.h> | ||
24 | |||
25 | /* | ||
26 | * We only use even ticket numbers so the '1' inserted by a tns is | ||
27 | * an unambiguous "ticket is busy" flag. | ||
28 | */ | ||
29 | #define TICKET_QUANTUM 2 | ||
30 | |||
31 | |||
32 | /* | ||
33 | * SMP ticket spinlocks, allowing only a single CPU anywhere | ||
34 | * | ||
35 | * (the type definitions are in asm/spinlock_types.h) | ||
36 | */ | ||
37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
38 | { | ||
39 | /* | ||
40 | * Note that even if a new ticket is in the process of being | ||
41 | * acquired, so lock->next_ticket is 1, it's still reasonable | ||
42 | * to claim the lock is held, since it will be momentarily | ||
43 | * if not already. There's no need to wait for a "valid" | ||
44 | * lock->next_ticket to become available. | ||
45 | */ | ||
46 | return lock->next_ticket != lock->current_ticket; | ||
47 | } | ||
48 | |||
49 | void arch_spin_lock(arch_spinlock_t *lock); | ||
50 | |||
51 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
52 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
53 | |||
54 | int arch_spin_trylock(arch_spinlock_t *lock); | ||
55 | |||
56 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
57 | { | ||
58 | /* For efficiency, overlap fetching the old ticket with the wmb(). */ | ||
59 | int old_ticket = lock->current_ticket; | ||
60 | wmb(); /* guarantee anything modified under the lock is visible */ | ||
61 | lock->current_ticket = old_ticket + TICKET_QUANTUM; | ||
62 | } | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
65 | |||
66 | /* | ||
67 | * Read-write spinlocks, allowing multiple readers | ||
68 | * but only one writer. | ||
69 | * | ||
70 | * We use a "tns/store-back" technique on a single word to manage | ||
71 | * the lock state, looping around to retry if the tns returns 1. | ||
72 | */ | ||
73 | |||
74 | /* Internal layout of the word; do not use. */ | ||
75 | #define _WR_NEXT_SHIFT 8 | ||
76 | #define _WR_CURR_SHIFT 16 | ||
77 | #define _WR_WIDTH 8 | ||
78 | #define _RD_COUNT_SHIFT 24 | ||
79 | #define _RD_COUNT_WIDTH 8 | ||
80 | |||
81 | /* Internal functions; do not use. */ | ||
82 | void arch_read_lock_slow(arch_rwlock_t *, u32); | ||
83 | int arch_read_trylock_slow(arch_rwlock_t *); | ||
84 | void arch_read_unlock_slow(arch_rwlock_t *); | ||
85 | void arch_write_lock_slow(arch_rwlock_t *, u32); | ||
86 | void arch_write_unlock_slow(arch_rwlock_t *, u32); | ||
87 | |||
88 | /** | ||
89 | * arch_read_can_lock() - would read_trylock() succeed? | ||
90 | */ | ||
91 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
92 | { | ||
93 | return (rwlock->lock << _RD_COUNT_WIDTH) == 0; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * arch_write_can_lock() - would write_trylock() succeed? | ||
98 | */ | ||
99 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
100 | { | ||
101 | return rwlock->lock == 0; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * arch_read_lock() - acquire a read lock. | ||
106 | */ | ||
107 | static inline void arch_read_lock(arch_rwlock_t *rwlock) | ||
108 | { | ||
109 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
110 | if (unlikely(val << _RD_COUNT_WIDTH)) { | ||
111 | arch_read_lock_slow(rwlock, val); | ||
112 | return; | ||
113 | } | ||
114 | rwlock->lock = val + (1 << _RD_COUNT_SHIFT); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * arch_read_lock() - acquire a write lock. | ||
119 | */ | ||
120 | static inline void arch_write_lock(arch_rwlock_t *rwlock) | ||
121 | { | ||
122 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
123 | if (unlikely(val != 0)) { | ||
124 | arch_write_lock_slow(rwlock, val); | ||
125 | return; | ||
126 | } | ||
127 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * arch_read_trylock() - try to acquire a read lock. | ||
132 | */ | ||
133 | static inline int arch_read_trylock(arch_rwlock_t *rwlock) | ||
134 | { | ||
135 | int locked; | ||
136 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
137 | if (unlikely(val & 1)) | ||
138 | return arch_read_trylock_slow(rwlock); | ||
139 | locked = (val << _RD_COUNT_WIDTH) == 0; | ||
140 | rwlock->lock = val + (locked << _RD_COUNT_SHIFT); | ||
141 | return locked; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * arch_write_trylock() - try to acquire a write lock. | ||
146 | */ | ||
147 | static inline int arch_write_trylock(arch_rwlock_t *rwlock) | ||
148 | { | ||
149 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
150 | |||
151 | /* | ||
152 | * If a tns is in progress, or there's a waiting or active locker, | ||
153 | * or active readers, we can't take the lock, so give up. | ||
154 | */ | ||
155 | if (unlikely(val != 0)) { | ||
156 | if (!(val & 1)) | ||
157 | rwlock->lock = val; | ||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | /* Set the "next" field to mark it locked. */ | ||
162 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
163 | return 1; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * arch_read_unlock() - release a read lock. | ||
168 | */ | ||
169 | static inline void arch_read_unlock(arch_rwlock_t *rwlock) | ||
170 | { | ||
171 | u32 val; | ||
172 | mb(); /* guarantee anything modified under the lock is visible */ | ||
173 | val = __insn_tns((int *)&rwlock->lock); | ||
174 | if (unlikely(val & 1)) { | ||
175 | arch_read_unlock_slow(rwlock); | ||
176 | return; | ||
177 | } | ||
178 | rwlock->lock = val - (1 << _RD_COUNT_SHIFT); | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * arch_write_unlock() - release a write lock. | ||
183 | */ | ||
184 | static inline void arch_write_unlock(arch_rwlock_t *rwlock) | ||
185 | { | ||
186 | u32 val; | ||
187 | mb(); /* guarantee anything modified under the lock is visible */ | ||
188 | val = __insn_tns((int *)&rwlock->lock); | ||
189 | if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { | ||
190 | arch_write_unlock_slow(rwlock, val); | ||
191 | return; | ||
192 | } | ||
193 | rwlock->lock = 0; | ||
194 | } | ||
195 | |||
196 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
197 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
198 | |||
199 | #endif /* _ASM_TILE_SPINLOCK_32_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_types.h b/arch/tile/include/asm/spinlock_types.h new file mode 100644 index 000000000000..a71f59b49c50 --- /dev/null +++ b/arch/tile/include/asm/spinlock_types.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_TYPES_H | ||
16 | #define _ASM_TILE_SPINLOCK_TYPES_H | ||
17 | |||
18 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
19 | # error "please don't include this file directly" | ||
20 | #endif | ||
21 | |||
22 | #ifdef __tilegx__ | ||
23 | |||
24 | /* Low 15 bits are "next"; high 15 bits are "current". */ | ||
25 | typedef struct arch_spinlock { | ||
26 | unsigned int lock; | ||
27 | } arch_spinlock_t; | ||
28 | |||
29 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
30 | |||
31 | /* High bit is "writer owns"; low 31 bits are a count of readers. */ | ||
32 | typedef struct arch_rwlock { | ||
33 | unsigned int lock; | ||
34 | } arch_rwlock_t; | ||
35 | |||
36 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
37 | |||
38 | #else | ||
39 | |||
40 | typedef struct arch_spinlock { | ||
41 | /* Next ticket number to hand out. */ | ||
42 | int next_ticket; | ||
43 | /* The ticket number that currently owns this lock. */ | ||
44 | int current_ticket; | ||
45 | } arch_spinlock_t; | ||
46 | |||
47 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 } | ||
48 | |||
49 | /* | ||
50 | * Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next", | ||
51 | * byte 2 for ticket-lock "current", byte 3 for reader count. | ||
52 | */ | ||
53 | typedef struct arch_rwlock { | ||
54 | unsigned int lock; | ||
55 | } arch_rwlock_t; | ||
56 | |||
57 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
58 | |||
59 | #endif | ||
60 | #endif /* _ASM_TILE_SPINLOCK_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h new file mode 100644 index 000000000000..f908473c322d --- /dev/null +++ b/arch/tile/include/asm/stack.h | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STACK_H | ||
16 | #define _ASM_TILE_STACK_H | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <asm/backtrace.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | /* Everything we need to keep track of a backtrace iteration */ | ||
24 | struct KBacktraceIterator { | ||
25 | BacktraceIterator it; | ||
26 | struct task_struct *task; /* task we are backtracing */ | ||
27 | HV_PTE *pgtable; /* page table for user space access */ | ||
28 | int end; /* iteration complete. */ | ||
29 | int new_context; /* new context is starting */ | ||
30 | int profile; /* profiling, so stop on async intrpt */ | ||
31 | int verbose; /* printk extra info (don't want to | ||
32 | * do this for profiling) */ | ||
33 | int is_current; /* backtracing current task */ | ||
34 | }; | ||
35 | |||
36 | /* Iteration methods for kernel backtraces */ | ||
37 | |||
38 | /* | ||
39 | * Initialize a KBacktraceIterator from a task_struct, and optionally from | ||
40 | * a set of registers. If the registers are omitted, the process is | ||
41 | * assumed to be descheduled, and registers are read from the process's | ||
42 | * thread_struct and stack. "verbose" means to printk some additional | ||
43 | * information about fault handlers as we pass them on the stack. | ||
44 | */ | ||
45 | extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
46 | struct task_struct *, struct pt_regs *); | ||
47 | |||
48 | /* Initialize iterator based on current stack. */ | ||
49 | extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt); | ||
50 | |||
51 | /* Helper method for above. */ | ||
52 | extern void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, | ||
53 | ulong pc, ulong lr, ulong sp, ulong r52); | ||
54 | |||
55 | /* No more frames? */ | ||
56 | extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt); | ||
57 | |||
58 | /* Advance to the next frame. */ | ||
59 | extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt); | ||
60 | |||
61 | /* | ||
62 | * Dump stack given complete register info. Use only from the | ||
63 | * architecture-specific code; show_stack() | ||
64 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
65 | */ | ||
66 | extern void tile_show_stack(struct KBacktraceIterator *, int headers); | ||
67 | |||
68 | /* Dump stack of current process, with registers to seed the backtrace. */ | ||
69 | extern void dump_stack_regs(struct pt_regs *); | ||
70 | |||
71 | /* Helper method for assembly dump_stack(). */ | ||
72 | extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52); | ||
73 | |||
74 | #endif /* _ASM_TILE_STACK_H */ | ||
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h new file mode 100644 index 000000000000..3dc90fa92c70 --- /dev/null +++ b/arch/tile/include/asm/stat.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/stat.h> | |||
diff --git a/arch/tile/include/asm/statfs.h b/arch/tile/include/asm/statfs.h new file mode 100644 index 000000000000..0b91fe198c20 --- /dev/null +++ b/arch/tile/include/asm/statfs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/statfs.h> | |||
diff --git a/arch/tile/include/asm/string.h b/arch/tile/include/asm/string.h new file mode 100644 index 000000000000..7535cf1a30e4 --- /dev/null +++ b/arch/tile/include/asm/string.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STRING_H | ||
16 | #define _ASM_TILE_STRING_H | ||
17 | |||
18 | #define __HAVE_ARCH_MEMCHR | ||
19 | #define __HAVE_ARCH_MEMSET | ||
20 | #define __HAVE_ARCH_MEMCPY | ||
21 | #define __HAVE_ARCH_MEMMOVE | ||
22 | #define __HAVE_ARCH_STRCHR | ||
23 | #define __HAVE_ARCH_STRLEN | ||
24 | |||
25 | extern __kernel_size_t strlen(const char *); | ||
26 | extern char *strchr(const char *s, int c); | ||
27 | extern void *memchr(const void *s, int c, size_t n); | ||
28 | extern void *memset(void *, int, __kernel_size_t); | ||
29 | extern void *memcpy(void *, const void *, __kernel_size_t); | ||
30 | extern void *memmove(void *, const void *, __kernel_size_t); | ||
31 | |||
32 | #endif /* _ASM_TILE_STRING_H */ | ||
diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h new file mode 100644 index 000000000000..25c686a00f1d --- /dev/null +++ b/arch/tile/include/asm/swab.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SWAB_H | ||
16 | #define _ASM_TILE_SWAB_H | ||
17 | |||
18 | /* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ | ||
19 | #define __arch_swab32(x) __builtin_bswap32(x) | ||
20 | #define __arch_swab64(x) __builtin_bswap64(x) | ||
21 | |||
22 | /* Use the variant that is natural for the wordsize. */ | ||
23 | #ifdef CONFIG_64BIT | ||
24 | #define __arch_swab16(x) (__builtin_bswap64(x) >> 48) | ||
25 | #else | ||
26 | #define __arch_swab16(x) (__builtin_bswap32(x) >> 16) | ||
27 | #endif | ||
28 | |||
29 | #endif /* _ASM_TILE_SWAB_H */ | ||
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h new file mode 100644 index 000000000000..d35e0dcb67b1 --- /dev/null +++ b/arch/tile/include/asm/syscall.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * See asm-generic/syscall.h for descriptions of what we must do here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALL_H | ||
19 | #define _ASM_TILE_SYSCALL_H | ||
20 | |||
21 | #include <linux/sched.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <arch/abi.h> | ||
24 | |||
25 | /* | ||
26 | * Only the low 32 bits of orig_r0 are meaningful, so we return int. | ||
27 | * This importantly ignores the high bits on 64-bit, so comparisons | ||
28 | * sign-extend the low 32 bits. | ||
29 | */ | ||
30 | static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs) | ||
31 | { | ||
32 | return regs->regs[TREG_SYSCALL_NR]; | ||
33 | } | ||
34 | |||
35 | static inline void syscall_rollback(struct task_struct *task, | ||
36 | struct pt_regs *regs) | ||
37 | { | ||
38 | regs->regs[0] = regs->orig_r0; | ||
39 | } | ||
40 | |||
41 | static inline long syscall_get_error(struct task_struct *task, | ||
42 | struct pt_regs *regs) | ||
43 | { | ||
44 | unsigned long error = regs->regs[0]; | ||
45 | return IS_ERR_VALUE(error) ? error : 0; | ||
46 | } | ||
47 | |||
48 | static inline long syscall_get_return_value(struct task_struct *task, | ||
49 | struct pt_regs *regs) | ||
50 | { | ||
51 | return regs->regs[0]; | ||
52 | } | ||
53 | |||
54 | static inline void syscall_set_return_value(struct task_struct *task, | ||
55 | struct pt_regs *regs, | ||
56 | int error, long val) | ||
57 | { | ||
58 | regs->regs[0] = (long) error ?: val; | ||
59 | } | ||
60 | |||
61 | static inline void syscall_get_arguments(struct task_struct *task, | ||
62 | struct pt_regs *regs, | ||
63 | unsigned int i, unsigned int n, | ||
64 | unsigned long *args) | ||
65 | { | ||
66 | BUG_ON(i + n > 6); | ||
67 | memcpy(args, ®s[i], n * sizeof(args[0])); | ||
68 | } | ||
69 | |||
70 | static inline void syscall_set_arguments(struct task_struct *task, | ||
71 | struct pt_regs *regs, | ||
72 | unsigned int i, unsigned int n, | ||
73 | const unsigned long *args) | ||
74 | { | ||
75 | BUG_ON(i + n > 6); | ||
76 | memcpy(®s[i], args, n * sizeof(args[0])); | ||
77 | } | ||
78 | |||
79 | #endif /* _ASM_TILE_SYSCALL_H */ | ||
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h new file mode 100644 index 000000000000..af165a74537f --- /dev/null +++ b/arch/tile/include/asm/syscalls.h | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * syscalls.h - Linux syscall interfaces (arch-specific) | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | ||
5 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation, version 2. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for | ||
15 | * more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALLS_H | ||
19 | #define _ASM_TILE_SYSCALLS_H | ||
20 | |||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/linkage.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/compat.h> | ||
26 | |||
27 | /* The array of function pointers for syscalls. */ | ||
28 | extern void *sys_call_table[]; | ||
29 | #ifdef CONFIG_COMPAT | ||
30 | extern void *compat_sys_call_table[]; | ||
31 | #endif | ||
32 | |||
33 | /* | ||
34 | * Note that by convention, any syscall which requires the current | ||
35 | * register set takes an additional "struct pt_regs *" pointer; the | ||
36 | * sys_xxx() function just adds the pointer and tail-calls to _sys_xxx(). | ||
37 | */ | ||
38 | |||
39 | /* kernel/sys.c */ | ||
40 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count); | ||
41 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
42 | u32 len, int advice); | ||
43 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
44 | u32 len_lo, u32 len_hi, int advice); | ||
45 | long sys_flush_cache(void); | ||
46 | long sys_mmap2(unsigned long addr, unsigned long len, | ||
47 | unsigned long prot, unsigned long flags, | ||
48 | unsigned long fd, unsigned long pgoff); | ||
49 | #ifdef __tilegx__ | ||
50 | long sys_mmap(unsigned long addr, unsigned long len, | ||
51 | unsigned long prot, unsigned long flags, | ||
52 | unsigned long fd, off_t pgoff); | ||
53 | #endif | ||
54 | |||
55 | /* kernel/process.c */ | ||
56 | long sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
57 | void __user *parent_tid, void __user *child_tid); | ||
58 | long _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
59 | void __user *parent_tid, void __user *child_tid, | ||
60 | struct pt_regs *regs); | ||
61 | long sys_fork(void); | ||
62 | long _sys_fork(struct pt_regs *regs); | ||
63 | long sys_vfork(void); | ||
64 | long _sys_vfork(struct pt_regs *regs); | ||
65 | long sys_execve(char __user *filename, char __user * __user *argv, | ||
66 | char __user * __user *envp); | ||
67 | long _sys_execve(char __user *filename, char __user * __user *argv, | ||
68 | char __user * __user *envp, struct pt_regs *regs); | ||
69 | |||
70 | /* kernel/signal.c */ | ||
71 | long sys_sigaltstack(const stack_t __user *, stack_t __user *); | ||
72 | long _sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
73 | struct pt_regs *); | ||
74 | long sys_rt_sigreturn(void); | ||
75 | long _sys_rt_sigreturn(struct pt_regs *regs); | ||
76 | |||
77 | /* platform-independent functions */ | ||
78 | long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); | ||
79 | long sys_rt_sigaction(int sig, const struct sigaction __user *act, | ||
80 | struct sigaction __user *oact, size_t sigsetsize); | ||
81 | |||
82 | #ifndef __tilegx__ | ||
83 | /* mm/fault.c */ | ||
84 | int sys_cmpxchg_badaddr(unsigned long address); | ||
85 | int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); | ||
86 | #endif | ||
87 | |||
88 | #ifdef CONFIG_COMPAT | ||
89 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
90 | compat_uptr_t __user *envp); | ||
91 | long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
92 | compat_uptr_t __user *envp, struct pt_regs *regs); | ||
93 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
94 | struct compat_sigaltstack __user *uoss_ptr); | ||
95 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
96 | struct compat_sigaltstack __user *uoss_ptr, | ||
97 | struct pt_regs *regs); | ||
98 | long compat_sys_rt_sigreturn(void); | ||
99 | long _compat_sys_rt_sigreturn(struct pt_regs *regs); | ||
100 | |||
101 | /* These four are not defined for 64-bit, but serve as "compat" syscalls. */ | ||
102 | long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg); | ||
103 | long sys_fstat64(unsigned long fd, struct stat64 __user *statbuf); | ||
104 | long sys_truncate64(const char __user *path, loff_t length); | ||
105 | long sys_ftruncate64(unsigned int fd, loff_t length); | ||
106 | #endif | ||
107 | |||
108 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h new file mode 100644 index 000000000000..f749be327ce0 --- /dev/null +++ b/arch/tile/include/asm/system.h | |||
@@ -0,0 +1,248 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SYSTEM_H | ||
16 | #define _ASM_TILE_SYSTEM_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/irqflags.h> | ||
22 | |||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | #include <arch/chip.h> | ||
27 | #include <arch/sim_def.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | /* | ||
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
32 | * depend on. | ||
33 | * | ||
34 | * No data-dependent reads from memory-like regions are ever reordered | ||
35 | * over this barrier. All reads preceding this primitive are guaranteed | ||
36 | * to access memory (but not necessarily other CPUs' caches) before any | ||
37 | * reads following this primitive that depend on the data return by | ||
38 | * any of the preceding reads. This primitive is much lighter weight than | ||
39 | * rmb() on most CPUs, and is never heavier weight than is | ||
40 | * rmb(). | ||
41 | * | ||
42 | * These ordering constraints are respected by both the local CPU | ||
43 | * and the compiler. | ||
44 | * | ||
45 | * Ordering is not guaranteed by anything other than these primitives, | ||
46 | * not even by data dependencies. See the documentation for | ||
47 | * memory_barrier() for examples and URLs to more information. | ||
48 | * | ||
49 | * For example, the following code would force ordering (the initial | ||
50 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
51 | * | ||
52 | * <programlisting> | ||
53 | * CPU 0 CPU 1 | ||
54 | * | ||
55 | * b = 2; | ||
56 | * memory_barrier(); | ||
57 | * p = &b; q = p; | ||
58 | * read_barrier_depends(); | ||
59 | * d = *q; | ||
60 | * </programlisting> | ||
61 | * | ||
62 | * because the read of "*q" depends on the read of "p" and these | ||
63 | * two reads are separated by a read_barrier_depends(). However, | ||
64 | * the following code, with the same initial values for "a" and "b": | ||
65 | * | ||
66 | * <programlisting> | ||
67 | * CPU 0 CPU 1 | ||
68 | * | ||
69 | * a = 2; | ||
70 | * memory_barrier(); | ||
71 | * b = 3; y = b; | ||
72 | * read_barrier_depends(); | ||
73 | * x = a; | ||
74 | * </programlisting> | ||
75 | * | ||
76 | * does not enforce ordering, since there is no data dependency between | ||
77 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
79 | * in cases like this where there are no data dependencies. | ||
80 | */ | ||
81 | |||
82 | #define read_barrier_depends() do { } while (0) | ||
83 | |||
84 | #define __sync() __insn_mf() | ||
85 | |||
86 | #if CHIP_HAS_SPLIT_CYCLE() | ||
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
88 | #else | ||
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
90 | #endif | ||
91 | |||
92 | /* Fence to guarantee visibility of stores to incoherent memory. */ | ||
93 | static inline void | ||
94 | mb_incoherent(void) | ||
95 | { | ||
96 | __insn_mf(); | ||
97 | |||
98 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
99 | { | ||
100 | int __mb_incoherent(void); | ||
101 | #if CHIP_HAS_TILE_WRITE_PENDING() | ||
102 | const unsigned long WRITE_TIMEOUT_CYCLES = 400; | ||
103 | unsigned long start = get_cycles_low(); | ||
104 | do { | ||
105 | if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) | ||
106 | return; | ||
107 | } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); | ||
108 | #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ | ||
109 | (void) __mb_incoherent(); | ||
110 | } | ||
111 | #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ | ||
112 | } | ||
113 | |||
114 | #define fast_wmb() __sync() | ||
115 | #define fast_rmb() __sync() | ||
116 | #define fast_mb() __sync() | ||
117 | #define fast_iob() mb_incoherent() | ||
118 | |||
119 | #define wmb() fast_wmb() | ||
120 | #define rmb() fast_rmb() | ||
121 | #define mb() fast_mb() | ||
122 | #define iob() fast_iob() | ||
123 | |||
124 | #ifdef CONFIG_SMP | ||
125 | #define smp_mb() mb() | ||
126 | #define smp_rmb() rmb() | ||
127 | #define smp_wmb() wmb() | ||
128 | #define smp_read_barrier_depends() read_barrier_depends() | ||
129 | #else | ||
130 | #define smp_mb() barrier() | ||
131 | #define smp_rmb() barrier() | ||
132 | #define smp_wmb() barrier() | ||
133 | #define smp_read_barrier_depends() do { } while (0) | ||
134 | #endif | ||
135 | |||
136 | #define set_mb(var, value) \ | ||
137 | do { var = value; mb(); } while (0) | ||
138 | |||
139 | /* | ||
140 | * Pause the DMA engine and static network before task switching. | ||
141 | */ | ||
142 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
143 | void _prepare_arch_switch(struct task_struct *next); | ||
144 | |||
145 | |||
146 | /* | ||
147 | * switch_to(n) should switch tasks to task nr n, first | ||
148 | * checking that n isn't the current task, in which case it does nothing. | ||
149 | * The number of callee-saved registers saved on the kernel stack | ||
150 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
151 | */ | ||
152 | #endif /* !__ASSEMBLY__ */ | ||
153 | #define CALLEE_SAVED_FIRST_REG 30 | ||
154 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
155 | #ifndef __ASSEMBLY__ | ||
156 | struct task_struct; | ||
157 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
158 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
159 | struct task_struct *next); | ||
160 | |||
161 | /* Helper function for _switch_to(). */ | ||
162 | extern struct task_struct *__switch_to(struct task_struct *prev, | ||
163 | struct task_struct *next, | ||
164 | unsigned long new_system_save_1_0); | ||
165 | |||
166 | /* Address that switched-away from tasks are at. */ | ||
167 | extern unsigned long get_switch_to_pc(void); | ||
168 | |||
169 | /* | ||
170 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
171 | * it needs a way to flush as much of the CPU's caches as possible: | ||
172 | * | ||
173 | * TODO: fill this in! | ||
174 | */ | ||
175 | static inline void sched_cacheflush(void) | ||
176 | { | ||
177 | } | ||
178 | |||
179 | #define arch_align_stack(x) (x) | ||
180 | |||
181 | /* | ||
182 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
183 | * intervention occurs and SIGBUS is delivered with no data address | ||
184 | * info. If 0, the kernel single-steps the instruction to discover | ||
185 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
186 | * a fixup. | ||
187 | */ | ||
188 | extern int unaligned_fixup; | ||
189 | |||
190 | /* Is the kernel printing on each unaligned fixup? */ | ||
191 | extern int unaligned_printk; | ||
192 | |||
193 | /* Number of unaligned fixups performed */ | ||
194 | extern unsigned int unaligned_fixup_count; | ||
195 | |||
196 | /* Init-time routine to do tile-specific per-cpu setup. */ | ||
197 | void setup_cpu(int boot); | ||
198 | |||
199 | /* User-level DMA management functions */ | ||
200 | void grant_dma_mpls(void); | ||
201 | void restrict_dma_mpls(void); | ||
202 | |||
203 | #ifdef CONFIG_HARDWALL | ||
204 | /* User-level network management functions */ | ||
205 | void reset_network_state(void); | ||
206 | void grant_network_mpls(void); | ||
207 | void restrict_network_mpls(void); | ||
208 | int hardwall_deactivate(struct task_struct *task); | ||
209 | |||
210 | /* Hook hardwall code into changes in affinity. */ | ||
211 | #define arch_set_cpus_allowed(p, new_mask) do { \ | ||
212 | if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ | ||
213 | hardwall_deactivate(p); \ | ||
214 | } while (0) | ||
215 | #endif | ||
216 | |||
217 | /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ | ||
218 | extern int _sim_syscall(int syscall_num, ...); | ||
219 | #define sim_syscall(syscall_num, ...) \ | ||
220 | _sim_syscall(SIM_CONTROL_SYSCALL + \ | ||
221 | ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \ | ||
222 | ## __VA_ARGS__) | ||
223 | |||
224 | /* | ||
225 | * Kernel threads can check to see if they need to migrate their | ||
226 | * stack whenever they return from a context switch; for user | ||
227 | * threads, we defer until they are returning to user-space. | ||
228 | */ | ||
229 | #define finish_arch_switch(prev) do { \ | ||
230 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
231 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
232 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
233 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
234 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
235 | if (current->mm == NULL && !kstack_hash && \ | ||
236 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
237 | homecache_migrate_kthread(); \ | ||
238 | } while (0) | ||
239 | |||
240 | /* Support function for forking a new task. */ | ||
241 | void ret_from_fork(void); | ||
242 | |||
243 | /* Called from ret_from_fork() when a new process starts up. */ | ||
244 | struct task_struct *sim_notify_fork(struct task_struct *prev); | ||
245 | |||
246 | #endif /* !__ASSEMBLY__ */ | ||
247 | |||
248 | #endif /* _ASM_TILE_SYSTEM_H */ | ||
diff --git a/arch/tile/include/asm/termbits.h b/arch/tile/include/asm/termbits.h new file mode 100644 index 000000000000..3935b106de79 --- /dev/null +++ b/arch/tile/include/asm/termbits.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termbits.h> | |||
diff --git a/arch/tile/include/asm/termios.h b/arch/tile/include/asm/termios.h new file mode 100644 index 000000000000..280d78a9d966 --- /dev/null +++ b/arch/tile/include/asm/termios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termios.h> | |||
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h new file mode 100644 index 000000000000..3872f2b345d2 --- /dev/null +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_THREAD_INFO_H | ||
17 | #define _ASM_TILE_THREAD_INFO_H | ||
18 | |||
19 | #include <asm/processor.h> | ||
20 | #include <asm/page.h> | ||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | /* | ||
24 | * Low level task data that assembly code needs immediate access to. | ||
25 | * The structure is placed at the bottom of the supervisor stack. | ||
26 | */ | ||
27 | struct thread_info { | ||
28 | struct task_struct *task; /* main task structure */ | ||
29 | struct exec_domain *exec_domain; /* execution domain */ | ||
30 | unsigned long flags; /* low level flags */ | ||
31 | unsigned long status; /* thread-synchronous flags */ | ||
32 | __u32 homecache_cpu; /* CPU we are homecached on */ | ||
33 | __u32 cpu; /* current CPU */ | ||
34 | int preempt_count; /* 0 => preemptable, | ||
35 | <0 => BUG */ | ||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space | ||
38 | (KERNEL_DS or USER_DS) */ | ||
39 | struct restart_block restart_block; | ||
40 | struct single_step_state *step_state; /* single step state | ||
41 | (if non-zero) */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * macros/functions for gaining access to the thread information structure. | ||
46 | */ | ||
47 | #define INIT_THREAD_INFO(tsk) \ | ||
48 | { \ | ||
49 | .task = &tsk, \ | ||
50 | .exec_domain = &default_exec_domain, \ | ||
51 | .flags = 0, \ | ||
52 | .cpu = 0, \ | ||
53 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
54 | .addr_limit = KERNEL_DS, \ | ||
55 | .restart_block = { \ | ||
56 | .fn = do_no_restart_syscall, \ | ||
57 | }, \ | ||
58 | .step_state = NULL, \ | ||
59 | } | ||
60 | |||
61 | #define init_thread_info (init_thread_union.thread_info) | ||
62 | #define init_stack (init_thread_union.stack) | ||
63 | |||
64 | #endif /* !__ASSEMBLY__ */ | ||
65 | |||
66 | #if PAGE_SIZE < 8192 | ||
67 | #define THREAD_SIZE_ORDER (13 - PAGE_SHIFT) | ||
68 | #else | ||
69 | #define THREAD_SIZE_ORDER (0) | ||
70 | #endif | ||
71 | |||
72 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
73 | #define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER) | ||
74 | |||
75 | #define STACK_WARN (THREAD_SIZE/8) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | |||
79 | /* How to get the thread information struct from C. */ | ||
80 | register unsigned long stack_pointer __asm__("sp"); | ||
81 | |||
82 | #define current_thread_info() \ | ||
83 | ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) | ||
84 | |||
85 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
86 | extern struct thread_info *alloc_thread_info(struct task_struct *task); | ||
87 | extern void free_thread_info(struct thread_info *info); | ||
88 | |||
89 | /* Sit on a nap instruction until interrupted. */ | ||
90 | extern void smp_nap(void); | ||
91 | |||
92 | /* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ | ||
93 | extern void _cpu_idle(void); | ||
94 | |||
95 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
96 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
97 | unsigned long new_sp, | ||
98 | unsigned long new_ss10); | ||
99 | |||
100 | #else /* __ASSEMBLY__ */ | ||
101 | |||
102 | /* how to get the thread information struct from ASM */ | ||
103 | #ifdef __tilegx__ | ||
104 | #define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 | ||
105 | #else | ||
106 | #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 | ||
107 | #endif | ||
108 | |||
109 | #endif /* !__ASSEMBLY__ */ | ||
110 | |||
111 | #define PREEMPT_ACTIVE 0x10000000 | ||
112 | |||
113 | /* | ||
114 | * Thread information flags that various assembly files may need to access. | ||
115 | * Keep flags accessed frequently in low bits, particular since it makes | ||
116 | * it easier to build constants in assembly. | ||
117 | */ | ||
118 | #define TIF_SIGPENDING 0 /* signal pending */ | ||
119 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ | ||
120 | #define TIF_SINGLESTEP 2 /* restore singlestep on return to | ||
121 | user mode */ | ||
122 | #define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */ | ||
123 | #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ | ||
124 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | ||
125 | #define TIF_SECCOMP 6 /* secure computing */ | ||
126 | #define TIF_MEMDIE 7 /* OOM killer at work */ | ||
127 | |||
128 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
129 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
130 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
131 | #define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB) | ||
132 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
133 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
134 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
135 | #define _TIF_MEMDIE (1<<TIF_MEMDIE) | ||
136 | |||
137 | /* Work to do on any return to user space. */ | ||
138 | #define _TIF_ALLWORK_MASK \ | ||
139 | (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) | ||
140 | |||
141 | /* | ||
142 | * Thread-synchronous status. | ||
143 | * | ||
144 | * This is different from the flags in that nobody else | ||
145 | * ever touches our thread-synchronous status, so we don't | ||
146 | * have to worry about atomic accesses. | ||
147 | */ | ||
148 | #ifdef __tilegx__ | ||
149 | #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ | ||
150 | #endif | ||
151 | #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ | ||
152 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ | ||
153 | |||
154 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
155 | |||
156 | #ifndef __ASSEMBLY__ | ||
157 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
158 | static inline void set_restore_sigmask(void) | ||
159 | { | ||
160 | struct thread_info *ti = current_thread_info(); | ||
161 | ti->status |= TS_RESTORE_SIGMASK; | ||
162 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
163 | } | ||
164 | #endif /* !__ASSEMBLY__ */ | ||
165 | |||
166 | #endif /* _ASM_TILE_THREAD_INFO_H */ | ||
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h new file mode 100644 index 000000000000..3baf5fc4c0a1 --- /dev/null +++ b/arch/tile/include/asm/timex.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TIMEX_H | ||
16 | #define _ASM_TILE_TIMEX_H | ||
17 | |||
18 | /* | ||
19 | * This rate should be a multiple of the possible HZ values (100, 250, 1000) | ||
20 | * and a fraction of the possible hardware timer frequencies. Our timer | ||
21 | * frequency is highly tunable but also quite precise, so for the primary use | ||
22 | * of this value (setting ACT_HZ from HZ) we just pick a value that causes | ||
23 | * ACT_HZ to be set to HZ. We make the value somewhat large just to be | ||
24 | * more robust in case someone tries out a new value of HZ. | ||
25 | */ | ||
26 | #define CLOCK_TICK_RATE 1000000 | ||
27 | |||
28 | typedef unsigned long long cycles_t; | ||
29 | |||
30 | #if CHIP_HAS_SPLIT_CYCLE() | ||
31 | cycles_t get_cycles(void); | ||
32 | #else | ||
33 | static inline cycles_t get_cycles(void) | ||
34 | { | ||
35 | return __insn_mfspr(SPR_CYCLE); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | cycles_t get_clock_rate(void); | ||
40 | |||
41 | /* Called at cpu initialization to set some low-level constants. */ | ||
42 | void setup_clock(void); | ||
43 | |||
44 | /* Called at cpu initialization to start the tile-timer clock device. */ | ||
45 | void setup_tile_timer(void); | ||
46 | |||
47 | #endif /* _ASM_TILE_TIMEX_H */ | ||
diff --git a/arch/tile/include/asm/tlb.h b/arch/tile/include/asm/tlb.h new file mode 100644 index 000000000000..4a891a1a8df3 --- /dev/null +++ b/arch/tile/include/asm/tlb.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLB_H | ||
16 | #define _ASM_TILE_TLB_H | ||
17 | |||
18 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
19 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
20 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
21 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
22 | |||
23 | #include <asm-generic/tlb.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_TLB_H */ | ||
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h new file mode 100644 index 000000000000..96199d214fb8 --- /dev/null +++ b/arch/tile/include/asm/tlbflush.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLBFLUSH_H | ||
16 | #define _ASM_TILE_TLBFLUSH_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | /* | ||
26 | * Rather than associating each mm with its own ASID, we just use | ||
27 | * ASIDs to allow us to lazily flush the TLB when we switch mms. | ||
28 | * This way we only have to do an actual TLB flush on mm switch | ||
29 | * every time we wrap ASIDs, not every single time we switch. | ||
30 | * | ||
31 | * FIXME: We might improve performance by keeping ASIDs around | ||
32 | * properly, though since the hypervisor direct-maps VAs to TSB | ||
33 | * entries, we're likely to have lost at least the executable page | ||
34 | * mappings by the time we switch back to the original mm. | ||
35 | */ | ||
36 | DECLARE_PER_CPU(int, current_asid); | ||
37 | |||
38 | /* The hypervisor tells us what ASIDs are available to us. */ | ||
39 | extern int min_asid, max_asid; | ||
40 | |||
41 | static inline unsigned long hv_page_size(const struct vm_area_struct *vma) | ||
42 | { | ||
43 | return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE; | ||
44 | } | ||
45 | |||
46 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ | ||
47 | #define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL) | ||
48 | |||
49 | /* Flush a single user page on this cpu. */ | ||
50 | static inline void local_flush_tlb_page(const struct vm_area_struct *vma, | ||
51 | unsigned long addr, | ||
52 | unsigned long page_size) | ||
53 | { | ||
54 | int rc = hv_flush_page(addr, page_size); | ||
55 | if (rc < 0) | ||
56 | panic("hv_flush_page(%#lx,%#lx) failed: %d", | ||
57 | addr, page_size, rc); | ||
58 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
59 | __flush_icache(); | ||
60 | } | ||
61 | |||
62 | /* Flush range of user pages on this cpu. */ | ||
63 | static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, | ||
64 | unsigned long addr, | ||
65 | unsigned long page_size, | ||
66 | unsigned long len) | ||
67 | { | ||
68 | int rc = hv_flush_pages(addr, page_size, len); | ||
69 | if (rc < 0) | ||
70 | panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d", | ||
71 | addr, page_size, len, rc); | ||
72 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
73 | __flush_icache(); | ||
74 | } | ||
75 | |||
76 | /* Flush all user pages on this cpu. */ | ||
77 | static inline void local_flush_tlb(void) | ||
78 | { | ||
79 | int rc = hv_flush_all(1); /* preserve global mappings */ | ||
80 | if (rc < 0) | ||
81 | panic("hv_flush_all(1) failed: %d", rc); | ||
82 | __flush_icache(); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Global pages have to be flushed a bit differently. Not a real | ||
87 | * performance problem because this does not happen often. | ||
88 | */ | ||
89 | static inline void local_flush_tlb_all(void) | ||
90 | { | ||
91 | int i; | ||
92 | for (i = 0; ; ++i) { | ||
93 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
94 | if (r.size == 0) | ||
95 | break; | ||
96 | local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size); | ||
97 | local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * TLB flushing: | ||
103 | * | ||
104 | * - flush_tlb() flushes the current mm struct TLBs | ||
105 | * - flush_tlb_all() flushes all processes TLBs | ||
106 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
107 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
108 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
109 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
110 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | ||
111 | * | ||
112 | * Here (as in vm_area_struct), "end" means the first byte after | ||
113 | * our end address. | ||
114 | */ | ||
115 | |||
116 | extern void flush_tlb_all(void); | ||
117 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
118 | extern void flush_tlb_current_task(void); | ||
119 | extern void flush_tlb_mm(struct mm_struct *); | ||
120 | extern void flush_tlb_page(const struct vm_area_struct *, unsigned long); | ||
121 | extern void flush_tlb_page_mm(const struct vm_area_struct *, | ||
122 | struct mm_struct *, unsigned long); | ||
123 | extern void flush_tlb_range(const struct vm_area_struct *, | ||
124 | unsigned long start, unsigned long end); | ||
125 | |||
126 | #define flush_tlb() flush_tlb_current_task() | ||
127 | |||
128 | #endif /* _ASM_TILE_TLBFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h new file mode 100644 index 000000000000..343172d422a9 --- /dev/null +++ b/arch/tile/include/asm/topology.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TOPOLOGY_H | ||
16 | #define _ASM_TILE_TOPOLOGY_H | ||
17 | |||
18 | #ifdef CONFIG_NUMA | ||
19 | |||
20 | #include <linux/cpumask.h> | ||
21 | |||
22 | /* Mappings between logical cpu number and node number. */ | ||
23 | extern struct cpumask node_2_cpu_mask[]; | ||
24 | extern char cpu_2_node[]; | ||
25 | |||
26 | /* Returns the number of the node containing CPU 'cpu'. */ | ||
27 | static inline int cpu_to_node(int cpu) | ||
28 | { | ||
29 | return cpu_2_node[cpu]; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Returns the number of the node containing Node 'node'. | ||
34 | * This architecture is flat, so it is a pretty simple function! | ||
35 | */ | ||
36 | #define parent_node(node) (node) | ||
37 | |||
38 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
39 | static inline const struct cpumask *cpumask_of_node(int node) | ||
40 | { | ||
41 | return &node_2_cpu_mask[node]; | ||
42 | } | ||
43 | |||
44 | /* For now, use numa node -1 for global allocation. */ | ||
45 | #define pcibus_to_node(bus) ((void)(bus), -1) | ||
46 | |||
47 | /* sched_domains SD_NODE_INIT for TILE architecture */ | ||
48 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
49 | .min_interval = 8, \ | ||
50 | .max_interval = 32, \ | ||
51 | .busy_factor = 32, \ | ||
52 | .imbalance_pct = 125, \ | ||
53 | .cache_nice_tries = 1, \ | ||
54 | .busy_idx = 3, \ | ||
55 | .idle_idx = 1, \ | ||
56 | .newidle_idx = 2, \ | ||
57 | .wake_idx = 1, \ | ||
58 | .flags = SD_LOAD_BALANCE \ | ||
59 | | SD_BALANCE_NEWIDLE \ | ||
60 | | SD_BALANCE_EXEC \ | ||
61 | | SD_BALANCE_FORK \ | ||
62 | | SD_WAKE_AFFINE \ | ||
63 | | SD_SERIALIZE, \ | ||
64 | .last_balance = jiffies, \ | ||
65 | .balance_interval = 1, \ | ||
66 | } | ||
67 | |||
68 | /* By definition, we create nodes based on online memory. */ | ||
69 | #define node_has_online_mem(nid) 1 | ||
70 | |||
71 | #endif /* CONFIG_NUMA */ | ||
72 | |||
73 | #include <asm-generic/topology.h> | ||
74 | |||
75 | #ifdef CONFIG_SMP | ||
76 | #define topology_physical_package_id(cpu) ((void)(cpu), 0) | ||
77 | #define topology_core_id(cpu) (cpu) | ||
78 | #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) | ||
79 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | ||
80 | |||
81 | /* indicates that pointers to the topology struct cpumask maps are valid */ | ||
82 | #define arch_provides_topology_pointers yes | ||
83 | #endif | ||
84 | |||
85 | #endif /* _ASM_TILE_TOPOLOGY_H */ | ||
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h new file mode 100644 index 000000000000..432a9c15c8a2 --- /dev/null +++ b/arch/tile/include/asm/traps.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TRAPS_H | ||
16 | #define _ASM_TILE_TRAPS_H | ||
17 | |||
18 | /* mm/fault.c */ | ||
19 | void do_page_fault(struct pt_regs *, int fault_num, | ||
20 | unsigned long address, unsigned long write); | ||
21 | void do_async_page_fault(struct pt_regs *); | ||
22 | |||
23 | #ifndef __tilegx__ | ||
24 | /* | ||
25 | * We return this structure in registers to avoid having to write | ||
26 | * additional save/restore code in the intvec.S caller. | ||
27 | */ | ||
28 | struct intvec_state { | ||
29 | void *handler; | ||
30 | unsigned long vecnum; | ||
31 | unsigned long fault_num; | ||
32 | unsigned long info; | ||
33 | unsigned long retval; | ||
34 | }; | ||
35 | struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | ||
36 | unsigned long address, | ||
37 | unsigned long info); | ||
38 | #endif | ||
39 | |||
40 | /* kernel/traps.c */ | ||
41 | void do_trap(struct pt_regs *, int fault_num, unsigned long reason); | ||
42 | void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52); | ||
43 | |||
44 | /* kernel/time.c */ | ||
45 | void do_timer_interrupt(struct pt_regs *, int fault_num); | ||
46 | |||
47 | /* kernel/messaging.c */ | ||
48 | void hv_message_intr(struct pt_regs *, int intnum); | ||
49 | |||
50 | /* kernel/irq.c */ | ||
51 | void tile_dev_intr(struct pt_regs *, int intnum); | ||
52 | |||
53 | #ifdef CONFIG_HARDWALL | ||
54 | /* kernel/hardwall.c */ | ||
55 | void do_hardwall_trap(struct pt_regs *, int fault_num); | ||
56 | #endif | ||
57 | |||
58 | /* kernel/ptrace.c */ | ||
59 | void do_breakpoint(struct pt_regs *, int fault_num); | ||
60 | |||
61 | |||
62 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/types.h b/arch/tile/include/asm/types.h new file mode 100644 index 000000000000..b9e79bc580dd --- /dev/null +++ b/arch/tile/include/asm/types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/types.h> | |||
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h new file mode 100644 index 000000000000..ed17a80ec0ed --- /dev/null +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -0,0 +1,580 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UACCESS_H | ||
16 | #define _ASM_TILE_UACCESS_H | ||
17 | |||
18 | /* | ||
19 | * User space memory access functions | ||
20 | */ | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <asm-generic/uaccess-unaligned.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/page.h> | ||
26 | |||
27 | #define VERIFY_READ 0 | ||
28 | #define VERIFY_WRITE 1 | ||
29 | |||
30 | /* | ||
31 | * The fs value determines whether argument validity checking should be | ||
32 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
33 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
34 | * | ||
35 | * For historical reasons, these macros are grossly misnamed. | ||
36 | */ | ||
37 | #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) | ||
38 | |||
39 | #define KERNEL_DS MAKE_MM_SEG(-1UL) | ||
40 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
41 | |||
42 | #define get_ds() (KERNEL_DS) | ||
43 | #define get_fs() (current_thread_info()->addr_limit) | ||
44 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
45 | |||
46 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
47 | |||
48 | #ifndef __tilegx__ | ||
49 | /* | ||
50 | * We could allow mapping all 16 MB at 0xfc000000, but we set up a | ||
51 | * special hack in arch_setup_additional_pages() to auto-create a mapping | ||
52 | * for the first 16 KB, and it would seem strange to have different | ||
53 | * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000. | ||
54 | */ | ||
55 | static inline int is_arch_mappable_range(unsigned long addr, | ||
56 | unsigned long size) | ||
57 | { | ||
58 | return (addr >= MEM_USER_INTRPT && | ||
59 | addr < (MEM_USER_INTRPT + INTRPT_SIZE) && | ||
60 | size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr); | ||
61 | } | ||
62 | #define is_arch_mappable_range is_arch_mappable_range | ||
63 | #else | ||
64 | #define is_arch_mappable_range(addr, size) 0 | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Test whether a block of memory is a valid user space address. | ||
69 | * Returns 0 if the range is valid, nonzero otherwise. | ||
70 | */ | ||
71 | int __range_ok(unsigned long addr, unsigned long size); | ||
72 | |||
73 | /** | ||
74 | * access_ok: - Checks if a user space pointer is valid | ||
75 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
76 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
77 | * to write to a block, it is always safe to read from it. | ||
78 | * @addr: User space pointer to start of block to check | ||
79 | * @size: Size of block to check | ||
80 | * | ||
81 | * Context: User context only. This function may sleep. | ||
82 | * | ||
83 | * Checks if a pointer to a block of memory in user space is valid. | ||
84 | * | ||
85 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
86 | * if it is definitely invalid. | ||
87 | * | ||
88 | * Note that, depending on architecture, this function probably just | ||
89 | * checks that the pointer is in the user space range - after calling | ||
90 | * this function, memory access functions may still return -EFAULT. | ||
91 | */ | ||
92 | #define access_ok(type, addr, size) ({ \ | ||
93 | __chk_user_ptr(addr); \ | ||
94 | likely(__range_ok((unsigned long)(addr), (size)) == 0); \ | ||
95 | }) | ||
96 | |||
97 | /* | ||
98 | * The exception table consists of pairs of addresses: the first is the | ||
99 | * address of an instruction that is allowed to fault, and the second is | ||
100 | * the address at which the program should continue. No registers are | ||
101 | * modified, so it is entirely up to the continuation code to figure out | ||
102 | * what to do. | ||
103 | * | ||
104 | * All the routines below use bits of fixup code that are out of line | ||
105 | * with the main instruction path. This means when everything is well, | ||
106 | * we don't even have to jump over them. Further, they do not intrude | ||
107 | * on our cache or tlb entries. | ||
108 | */ | ||
109 | |||
110 | struct exception_table_entry { | ||
111 | unsigned long insn, fixup; | ||
112 | }; | ||
113 | |||
114 | extern int fixup_exception(struct pt_regs *regs); | ||
115 | |||
116 | /* | ||
117 | * We return the __get_user_N function results in a structure, | ||
118 | * thus in r0 and r1. If "err" is zero, "val" is the result | ||
119 | * of the read; otherwise, "err" is -EFAULT. | ||
120 | * | ||
121 | * We rarely need 8-byte values on a 32-bit architecture, but | ||
122 | * we size the structure to accommodate. In practice, for the | ||
123 | * the smaller reads, we can zero the high word for free, and | ||
124 | * the caller will ignore it by virtue of casting anyway. | ||
125 | */ | ||
126 | struct __get_user { | ||
127 | unsigned long long val; | ||
128 | int err; | ||
129 | }; | ||
130 | |||
131 | /* | ||
132 | * FIXME: we should express these as inline extended assembler, since | ||
133 | * they're fundamentally just a variable dereference and some | ||
134 | * supporting exception_table gunk. Note that (a la i386) we can | ||
135 | * extend the copy_to_user and copy_from_user routines to call into | ||
136 | * such extended assembler routines, though we will have to use a | ||
137 | * different return code in that case (1, 2, or 4, rather than -EFAULT). | ||
138 | */ | ||
139 | extern struct __get_user __get_user_1(const void __user *); | ||
140 | extern struct __get_user __get_user_2(const void __user *); | ||
141 | extern struct __get_user __get_user_4(const void __user *); | ||
142 | extern struct __get_user __get_user_8(const void __user *); | ||
143 | extern int __put_user_1(long, void __user *); | ||
144 | extern int __put_user_2(long, void __user *); | ||
145 | extern int __put_user_4(long, void __user *); | ||
146 | extern int __put_user_8(long long, void __user *); | ||
147 | |||
148 | /* Unimplemented routines to cause linker failures */ | ||
149 | extern struct __get_user __get_user_bad(void); | ||
150 | extern int __put_user_bad(void); | ||
151 | |||
152 | /* | ||
153 | * Careful: we have to cast the result to the type of the pointer | ||
154 | * for sign reasons. | ||
155 | */ | ||
156 | /** | ||
157 | * __get_user: - Get a simple variable from user space, with less checking. | ||
158 | * @x: Variable to store result. | ||
159 | * @ptr: Source address, in user space. | ||
160 | * | ||
161 | * Context: User context only. This function may sleep. | ||
162 | * | ||
163 | * This macro copies a single simple variable from user space to kernel | ||
164 | * space. It supports simple types like char and int, but not larger | ||
165 | * data types like structures or arrays. | ||
166 | * | ||
167 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
168 | * dereferencing @ptr must be assignable to @x without a cast. | ||
169 | * | ||
170 | * Returns zero on success, or -EFAULT on error. | ||
171 | * On error, the variable @x is set to zero. | ||
172 | * | ||
173 | * Caller must check the pointer with access_ok() before calling this | ||
174 | * function. | ||
175 | */ | ||
176 | #define __get_user(x, ptr) \ | ||
177 | ({ struct __get_user __ret; \ | ||
178 | __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ | ||
179 | __chk_user_ptr(__gu_addr); \ | ||
180 | switch (sizeof(*(__gu_addr))) { \ | ||
181 | case 1: \ | ||
182 | __ret = __get_user_1(__gu_addr); \ | ||
183 | break; \ | ||
184 | case 2: \ | ||
185 | __ret = __get_user_2(__gu_addr); \ | ||
186 | break; \ | ||
187 | case 4: \ | ||
188 | __ret = __get_user_4(__gu_addr); \ | ||
189 | break; \ | ||
190 | case 8: \ | ||
191 | __ret = __get_user_8(__gu_addr); \ | ||
192 | break; \ | ||
193 | default: \ | ||
194 | __ret = __get_user_bad(); \ | ||
195 | break; \ | ||
196 | } \ | ||
197 | (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ | ||
198 | __ret.val; \ | ||
199 | __ret.err; \ | ||
200 | }) | ||
201 | |||
202 | /** | ||
203 | * __put_user: - Write a simple value into user space, with less checking. | ||
204 | * @x: Value to copy to user space. | ||
205 | * @ptr: Destination address, in user space. | ||
206 | * | ||
207 | * Context: User context only. This function may sleep. | ||
208 | * | ||
209 | * This macro copies a single simple value from kernel space to user | ||
210 | * space. It supports simple types like char and int, but not larger | ||
211 | * data types like structures or arrays. | ||
212 | * | ||
213 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
214 | * to the result of dereferencing @ptr. | ||
215 | * | ||
216 | * Caller must check the pointer with access_ok() before calling this | ||
217 | * function. | ||
218 | * | ||
219 | * Returns zero on success, or -EFAULT on error. | ||
220 | * | ||
221 | * Implementation note: The "case 8" logic of casting to the type of | ||
222 | * the result of subtracting the value from itself is basically a way | ||
223 | * of keeping all integer types the same, but casting any pointers to | ||
224 | * ptrdiff_t, i.e. also an integer type. This way there are no | ||
225 | * questionable casts seen by the compiler on an ILP32 platform. | ||
226 | */ | ||
227 | #define __put_user(x, ptr) \ | ||
228 | ({ \ | ||
229 | int __pu_err = 0; \ | ||
230 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
231 | typeof(*__pu_addr) __pu_val = (x); \ | ||
232 | __chk_user_ptr(__pu_addr); \ | ||
233 | switch (sizeof(__pu_val)) { \ | ||
234 | case 1: \ | ||
235 | __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ | ||
236 | break; \ | ||
237 | case 2: \ | ||
238 | __pu_err = __put_user_2((long)__pu_val, __pu_addr); \ | ||
239 | break; \ | ||
240 | case 4: \ | ||
241 | __pu_err = __put_user_4((long)__pu_val, __pu_addr); \ | ||
242 | break; \ | ||
243 | case 8: \ | ||
244 | __pu_err = \ | ||
245 | __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\ | ||
246 | __pu_addr); \ | ||
247 | break; \ | ||
248 | default: \ | ||
249 | __pu_err = __put_user_bad(); \ | ||
250 | break; \ | ||
251 | } \ | ||
252 | __pu_err; \ | ||
253 | }) | ||
254 | |||
255 | /* | ||
256 | * The versions of get_user and put_user without initial underscores | ||
257 | * check the address of their arguments to make sure they are not | ||
258 | * in kernel space. | ||
259 | */ | ||
260 | #define put_user(x, ptr) \ | ||
261 | ({ \ | ||
262 | __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \ | ||
263 | access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \ | ||
264 | __put_user((x), (__Pu_addr)) : \ | ||
265 | -EFAULT; \ | ||
266 | }) | ||
267 | |||
268 | #define get_user(x, ptr) \ | ||
269 | ({ \ | ||
270 | __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \ | ||
271 | access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \ | ||
272 | __get_user((x), (__Gu_addr)) : \ | ||
273 | ((x) = 0, -EFAULT); \ | ||
274 | }) | ||
275 | |||
276 | /** | ||
277 | * __copy_to_user() - copy data into user space, with less checking. | ||
278 | * @to: Destination address, in user space. | ||
279 | * @from: Source address, in kernel space. | ||
280 | * @n: Number of bytes to copy. | ||
281 | * | ||
282 | * Context: User context only. This function may sleep. | ||
283 | * | ||
284 | * Copy data from kernel space to user space. Caller must check | ||
285 | * the specified block with access_ok() before calling this function. | ||
286 | * | ||
287 | * Returns number of bytes that could not be copied. | ||
288 | * On success, this will be zero. | ||
289 | * | ||
290 | * An alternate version - __copy_to_user_inatomic() - is designed | ||
291 | * to be called from atomic context, typically bracketed by calls | ||
292 | * to pagefault_disable() and pagefault_enable(). | ||
293 | */ | ||
294 | extern unsigned long __must_check __copy_to_user_inatomic( | ||
295 | void __user *to, const void *from, unsigned long n); | ||
296 | |||
297 | static inline unsigned long __must_check | ||
298 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
299 | { | ||
300 | might_fault(); | ||
301 | return __copy_to_user_inatomic(to, from, n); | ||
302 | } | ||
303 | |||
304 | static inline unsigned long __must_check | ||
305 | copy_to_user(void __user *to, const void *from, unsigned long n) | ||
306 | { | ||
307 | if (access_ok(VERIFY_WRITE, to, n)) | ||
308 | n = __copy_to_user(to, from, n); | ||
309 | return n; | ||
310 | } | ||
311 | |||
312 | /** | ||
313 | * __copy_from_user() - copy data from user space, with less checking. | ||
314 | * @to: Destination address, in kernel space. | ||
315 | * @from: Source address, in user space. | ||
316 | * @n: Number of bytes to copy. | ||
317 | * | ||
318 | * Context: User context only. This function may sleep. | ||
319 | * | ||
320 | * Copy data from user space to kernel space. Caller must check | ||
321 | * the specified block with access_ok() before calling this function. | ||
322 | * | ||
323 | * Returns number of bytes that could not be copied. | ||
324 | * On success, this will be zero. | ||
325 | * | ||
326 | * If some data could not be copied, this function will pad the copied | ||
327 | * data to the requested size using zero bytes. | ||
328 | * | ||
329 | * An alternate version - __copy_from_user_inatomic() - is designed | ||
330 | * to be called from atomic context, typically bracketed by calls | ||
331 | * to pagefault_disable() and pagefault_enable(). This version | ||
332 | * does *NOT* pad with zeros. | ||
333 | */ | ||
334 | extern unsigned long __must_check __copy_from_user_inatomic( | ||
335 | void *to, const void __user *from, unsigned long n); | ||
336 | extern unsigned long __must_check __copy_from_user_zeroing( | ||
337 | void *to, const void __user *from, unsigned long n); | ||
338 | |||
339 | static inline unsigned long __must_check | ||
340 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
341 | { | ||
342 | might_fault(); | ||
343 | return __copy_from_user_zeroing(to, from, n); | ||
344 | } | ||
345 | |||
346 | static inline unsigned long __must_check | ||
347 | _copy_from_user(void *to, const void __user *from, unsigned long n) | ||
348 | { | ||
349 | if (access_ok(VERIFY_READ, from, n)) | ||
350 | n = __copy_from_user(to, from, n); | ||
351 | else | ||
352 | memset(to, 0, n); | ||
353 | return n; | ||
354 | } | ||
355 | |||
356 | #ifdef CONFIG_DEBUG_COPY_FROM_USER | ||
357 | extern void copy_from_user_overflow(void) | ||
358 | __compiletime_warning("copy_from_user() size is not provably correct"); | ||
359 | |||
360 | static inline unsigned long __must_check copy_from_user(void *to, | ||
361 | const void __user *from, | ||
362 | unsigned long n) | ||
363 | { | ||
364 | int sz = __compiletime_object_size(to); | ||
365 | |||
366 | if (likely(sz == -1 || sz >= n)) | ||
367 | n = _copy_from_user(to, from, n); | ||
368 | else | ||
369 | copy_from_user_overflow(); | ||
370 | |||
371 | return n; | ||
372 | } | ||
373 | #else | ||
374 | #define copy_from_user _copy_from_user | ||
375 | #endif | ||
376 | |||
377 | #ifdef __tilegx__ | ||
378 | /** | ||
379 | * __copy_in_user() - copy data within user space, with less checking. | ||
380 | * @to: Destination address, in user space. | ||
381 | * @from: Source address, in kernel space. | ||
382 | * @n: Number of bytes to copy. | ||
383 | * | ||
384 | * Context: User context only. This function may sleep. | ||
385 | * | ||
386 | * Copy data from user space to user space. Caller must check | ||
387 | * the specified blocks with access_ok() before calling this function. | ||
388 | * | ||
389 | * Returns number of bytes that could not be copied. | ||
390 | * On success, this will be zero. | ||
391 | */ | ||
392 | extern unsigned long __copy_in_user_asm( | ||
393 | void __user *to, const void __user *from, unsigned long n); | ||
394 | |||
395 | static inline unsigned long __must_check | ||
396 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
397 | { | ||
398 | might_sleep(); | ||
399 | return __copy_in_user_asm(to, from, n); | ||
400 | } | ||
401 | |||
402 | static inline unsigned long __must_check | ||
403 | copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
404 | { | ||
405 | if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) | ||
406 | n = __copy_in_user(to, from, n); | ||
407 | return n; | ||
408 | } | ||
409 | #endif | ||
410 | |||
411 | |||
412 | /** | ||
413 | * strlen_user: - Get the size of a string in user space. | ||
414 | * @str: The string to measure. | ||
415 | * | ||
416 | * Context: User context only. This function may sleep. | ||
417 | * | ||
418 | * Get the size of a NUL-terminated string in user space. | ||
419 | * | ||
420 | * Returns the size of the string INCLUDING the terminating NUL. | ||
421 | * On exception, returns 0. | ||
422 | * | ||
423 | * If there is a limit on the length of a valid string, you may wish to | ||
424 | * consider using strnlen_user() instead. | ||
425 | */ | ||
426 | extern long strnlen_user_asm(const char __user *str, long n); | ||
427 | static inline long __must_check strnlen_user(const char __user *str, long n) | ||
428 | { | ||
429 | might_fault(); | ||
430 | return strnlen_user_asm(str, n); | ||
431 | } | ||
432 | #define strlen_user(str) strnlen_user(str, LONG_MAX) | ||
433 | |||
434 | /** | ||
435 | * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | ||
436 | * @dst: Destination address, in kernel space. This buffer must be at | ||
437 | * least @count bytes long. | ||
438 | * @src: Source address, in user space. | ||
439 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
440 | * | ||
441 | * Copies a NUL-terminated string from userspace to kernel space. | ||
442 | * Caller must check the specified block with access_ok() before calling | ||
443 | * this function. | ||
444 | * | ||
445 | * On success, returns the length of the string (not including the trailing | ||
446 | * NUL). | ||
447 | * | ||
448 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
449 | * copied). | ||
450 | * | ||
451 | * If @count is smaller than the length of the string, copies @count bytes | ||
452 | * and returns @count. | ||
453 | */ | ||
454 | extern long strncpy_from_user_asm(char *dst, const char __user *src, long); | ||
455 | static inline long __must_check __strncpy_from_user( | ||
456 | char *dst, const char __user *src, long count) | ||
457 | { | ||
458 | might_fault(); | ||
459 | return strncpy_from_user_asm(dst, src, count); | ||
460 | } | ||
461 | static inline long __must_check strncpy_from_user( | ||
462 | char *dst, const char __user *src, long count) | ||
463 | { | ||
464 | if (access_ok(VERIFY_READ, src, 1)) | ||
465 | return __strncpy_from_user(dst, src, count); | ||
466 | return -EFAULT; | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * clear_user: - Zero a block of memory in user space. | ||
471 | * @mem: Destination address, in user space. | ||
472 | * @len: Number of bytes to zero. | ||
473 | * | ||
474 | * Zero a block of memory in user space. | ||
475 | * | ||
476 | * Returns number of bytes that could not be cleared. | ||
477 | * On success, this will be zero. | ||
478 | */ | ||
479 | extern unsigned long clear_user_asm(void __user *mem, unsigned long len); | ||
480 | static inline unsigned long __must_check __clear_user( | ||
481 | void __user *mem, unsigned long len) | ||
482 | { | ||
483 | might_fault(); | ||
484 | return clear_user_asm(mem, len); | ||
485 | } | ||
486 | static inline unsigned long __must_check clear_user( | ||
487 | void __user *mem, unsigned long len) | ||
488 | { | ||
489 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
490 | return __clear_user(mem, len); | ||
491 | return len; | ||
492 | } | ||
493 | |||
494 | /** | ||
495 | * flush_user: - Flush a block of memory in user space from cache. | ||
496 | * @mem: Destination address, in user space. | ||
497 | * @len: Number of bytes to flush. | ||
498 | * | ||
499 | * Returns number of bytes that could not be flushed. | ||
500 | * On success, this will be zero. | ||
501 | */ | ||
502 | extern unsigned long flush_user_asm(void __user *mem, unsigned long len); | ||
503 | static inline unsigned long __must_check __flush_user( | ||
504 | void __user *mem, unsigned long len) | ||
505 | { | ||
506 | int retval; | ||
507 | |||
508 | might_fault(); | ||
509 | retval = flush_user_asm(mem, len); | ||
510 | mb_incoherent(); | ||
511 | return retval; | ||
512 | } | ||
513 | |||
514 | static inline unsigned long __must_check flush_user( | ||
515 | void __user *mem, unsigned long len) | ||
516 | { | ||
517 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
518 | return __flush_user(mem, len); | ||
519 | return len; | ||
520 | } | ||
521 | |||
522 | /** | ||
523 | * inv_user: - Invalidate a block of memory in user space from cache. | ||
524 | * @mem: Destination address, in user space. | ||
525 | * @len: Number of bytes to invalidate. | ||
526 | * | ||
527 | * Returns number of bytes that could not be invalidated. | ||
528 | * On success, this will be zero. | ||
529 | * | ||
530 | * Note that on Tile64, the "inv" operation is in fact a | ||
531 | * "flush and invalidate", so cache write-backs will occur prior | ||
532 | * to the cache being marked invalid. | ||
533 | */ | ||
534 | extern unsigned long inv_user_asm(void __user *mem, unsigned long len); | ||
535 | static inline unsigned long __must_check __inv_user( | ||
536 | void __user *mem, unsigned long len) | ||
537 | { | ||
538 | int retval; | ||
539 | |||
540 | might_fault(); | ||
541 | retval = inv_user_asm(mem, len); | ||
542 | mb_incoherent(); | ||
543 | return retval; | ||
544 | } | ||
545 | static inline unsigned long __must_check inv_user( | ||
546 | void __user *mem, unsigned long len) | ||
547 | { | ||
548 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
549 | return __inv_user(mem, len); | ||
550 | return len; | ||
551 | } | ||
552 | |||
553 | /** | ||
554 | * finv_user: - Flush-inval a block of memory in user space from cache. | ||
555 | * @mem: Destination address, in user space. | ||
556 | * @len: Number of bytes to invalidate. | ||
557 | * | ||
558 | * Returns number of bytes that could not be flush-invalidated. | ||
559 | * On success, this will be zero. | ||
560 | */ | ||
561 | extern unsigned long finv_user_asm(void __user *mem, unsigned long len); | ||
562 | static inline unsigned long __must_check __finv_user( | ||
563 | void __user *mem, unsigned long len) | ||
564 | { | ||
565 | int retval; | ||
566 | |||
567 | might_fault(); | ||
568 | retval = finv_user_asm(mem, len); | ||
569 | mb_incoherent(); | ||
570 | return retval; | ||
571 | } | ||
572 | static inline unsigned long __must_check finv_user( | ||
573 | void __user *mem, unsigned long len) | ||
574 | { | ||
575 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
576 | return __finv_user(mem, len); | ||
577 | return len; | ||
578 | } | ||
579 | |||
580 | #endif /* _ASM_TILE_UACCESS_H */ | ||
diff --git a/arch/tile/include/asm/ucontext.h b/arch/tile/include/asm/ucontext.h new file mode 100644 index 000000000000..9bc07b9f30fb --- /dev/null +++ b/arch/tile/include/asm/ucontext.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ucontext.h> | |||
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h new file mode 100644 index 000000000000..137e2de5b102 --- /dev/null +++ b/arch/tile/include/asm/unaligned.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UNALIGNED_H | ||
16 | #define _ASM_TILE_UNALIGNED_H | ||
17 | |||
18 | #include <linux/unaligned/le_struct.h> | ||
19 | #include <linux/unaligned/be_byteshift.h> | ||
20 | #include <linux/unaligned/generic.h> | ||
21 | #define get_unaligned __get_unaligned_le | ||
22 | #define put_unaligned __put_unaligned_le | ||
23 | |||
24 | #endif /* _ASM_TILE_UNALIGNED_H */ | ||
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h new file mode 100644 index 000000000000..f2e3ff485333 --- /dev/null +++ b/arch/tile/include/asm/unistd.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) | ||
16 | #define _ASM_TILE_UNISTD_H | ||
17 | |||
18 | #ifndef __LP64__ | ||
19 | /* Use the flavor of this syscall that matches the 32-bit API better. */ | ||
20 | #define __ARCH_WANT_SYNC_FILE_RANGE2 | ||
21 | #endif | ||
22 | |||
23 | /* Use the standard ABI for syscalls. */ | ||
24 | #include <asm-generic/unistd.h> | ||
25 | |||
26 | /* Additional Tilera-specific syscalls. */ | ||
27 | #define __NR_flush_cache (__NR_arch_specific_syscall + 1) | ||
28 | __SYSCALL(__NR_flush_cache, sys_flush_cache) | ||
29 | |||
30 | #ifndef __tilegx__ | ||
31 | /* "Fast" syscalls provide atomic support for 32-bit chips. */ | ||
32 | #define __NR_FAST_cmpxchg -1 | ||
33 | #define __NR_FAST_atomic_update -2 | ||
34 | #define __NR_FAST_cmpxchg64 -3 | ||
35 | #define __NR_cmpxchg_badaddr (__NR_arch_specific_syscall + 0) | ||
36 | __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr) | ||
37 | #endif | ||
38 | |||
39 | #ifdef __KERNEL__ | ||
40 | /* In compat mode, we use sys_llseek() for compat_sys_llseek(). */ | ||
41 | #ifdef CONFIG_COMPAT | ||
42 | #define __ARCH_WANT_SYS_LLSEEK | ||
43 | #endif | ||
44 | #endif | ||
45 | |||
46 | #endif /* _ASM_TILE_UNISTD_H */ | ||
diff --git a/arch/tile/include/asm/user.h b/arch/tile/include/asm/user.h new file mode 100644 index 000000000000..cbc8b4d5a5ce --- /dev/null +++ b/arch/tile/include/asm/user.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_USER_H | ||
17 | #define _ASM_TILE_USER_H | ||
18 | |||
19 | /* This header is for a.out file formats, which TILE does not support. */ | ||
20 | |||
21 | #endif /* _ASM_TILE_USER_H */ | ||
diff --git a/arch/tile/include/asm/xor.h b/arch/tile/include/asm/xor.h new file mode 100644 index 000000000000..c82eb12a5b18 --- /dev/null +++ b/arch/tile/include/asm/xor.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/xor.h> | |||
diff --git a/arch/tile/include/hv/drv_pcie_rc_intf.h b/arch/tile/include/hv/drv_pcie_rc_intf.h new file mode 100644 index 000000000000..9bd2243bece0 --- /dev/null +++ b/arch/tile/include/hv/drv_pcie_rc_intf.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drv_pcie_rc_intf.h | ||
17 | * Interface definitions for the PCIE Root Complex. | ||
18 | */ | ||
19 | |||
20 | #ifndef _SYS_HV_DRV_PCIE_RC_INTF_H | ||
21 | #define _SYS_HV_DRV_PCIE_RC_INTF_H | ||
22 | |||
23 | /** File offset for reading the interrupt base number used for PCIE legacy | ||
24 | interrupts and PLX Gen 1 requirement flag */ | ||
25 | #define PCIE_RC_CONFIG_MASK_OFF 0 | ||
26 | |||
27 | |||
28 | /** | ||
29 | * Structure used for obtaining PCIe config information, read from the PCIE | ||
30 | * subsystem /ctl file at initialization | ||
31 | */ | ||
32 | typedef struct pcie_rc_config | ||
33 | { | ||
34 | int intr; /**< interrupt number used for downcall */ | ||
35 | int plx_gen1; /**< flag for PLX Gen 1 configuration */ | ||
36 | } pcie_rc_config_t; | ||
37 | |||
38 | #endif /* _SYS_HV_DRV_PCIE_RC_INTF_H */ | ||
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h new file mode 100644 index 000000000000..59b46dc53994 --- /dev/null +++ b/arch/tile/include/hv/hypervisor.h | |||
@@ -0,0 +1,2375 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file hypervisor.h | ||
17 | * The hypervisor's public API. | ||
18 | */ | ||
19 | |||
20 | #ifndef _TILE_HV_H | ||
21 | #define _TILE_HV_H | ||
22 | |||
23 | #include <arch/chip.h> | ||
24 | |||
25 | #include <hv/pagesize.h> | ||
26 | |||
27 | /* Linux builds want unsigned long constants, but assembler wants numbers */ | ||
28 | #ifdef __ASSEMBLER__ | ||
29 | /** One, for assembler */ | ||
30 | #define __HV_SIZE_ONE 1 | ||
31 | #elif !defined(__tile__) && CHIP_VA_WIDTH() > 32 | ||
32 | /** One, for 64-bit on host */ | ||
33 | #define __HV_SIZE_ONE 1ULL | ||
34 | #else | ||
35 | /** One, for Linux */ | ||
36 | #define __HV_SIZE_ONE 1UL | ||
37 | #endif | ||
38 | |||
39 | /** The log2 of the span of a level-1 page table, in bytes. | ||
40 | */ | ||
41 | #define HV_LOG2_L1_SPAN 32 | ||
42 | |||
43 | /** The span of a level-1 page table, in bytes. | ||
44 | */ | ||
45 | #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) | ||
46 | |||
47 | /** The size of small pages, in bytes. This value should be verified | ||
48 | * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). | ||
49 | */ | ||
50 | #define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) | ||
51 | |||
52 | /** The size of large pages, in bytes. This value should be verified | ||
53 | * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). | ||
54 | */ | ||
55 | #define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) | ||
56 | |||
57 | /** The log2 of the granularity at which page tables must be aligned; | ||
58 | * in other words, the CPA for a page table must have this many zero | ||
59 | * bits at the bottom of the address. | ||
60 | */ | ||
61 | #define HV_LOG2_PAGE_TABLE_ALIGN 11 | ||
62 | |||
63 | /** The granularity at which page tables must be aligned. | ||
64 | */ | ||
65 | #define HV_PAGE_TABLE_ALIGN (__HV_SIZE_ONE << HV_LOG2_PAGE_TABLE_ALIGN) | ||
66 | |||
67 | /** Normal start of hypervisor glue in client physical memory. */ | ||
68 | #define HV_GLUE_START_CPA 0x10000 | ||
69 | |||
70 | /** This much space is reserved at HV_GLUE_START_CPA | ||
71 | * for the hypervisor glue. The client program must start at | ||
72 | * some address higher than this, and in particular the address of | ||
73 | * its text section should be equal to zero modulo HV_PAGE_SIZE_LARGE | ||
74 | * so that relative offsets to the HV glue are correct. | ||
75 | */ | ||
76 | #define HV_GLUE_RESERVED_SIZE 0x10000 | ||
77 | |||
78 | /** Each entry in the hv dispatch array takes this many bytes. */ | ||
79 | #define HV_DISPATCH_ENTRY_SIZE 32 | ||
80 | |||
81 | /** Version of the hypervisor interface defined by this file */ | ||
82 | #define _HV_VERSION 11 | ||
83 | |||
84 | /* Index into hypervisor interface dispatch code blocks. | ||
85 | * | ||
86 | * Hypervisor calls are invoked from user space by calling code | ||
87 | * at an address HV_BASE_ADDRESS + (index) * HV_DISPATCH_ENTRY_SIZE, | ||
88 | * where index is one of these enum values. | ||
89 | * | ||
90 | * Normally a supervisor is expected to produce a set of symbols | ||
91 | * starting at HV_BASE_ADDRESS that obey this convention, but a user | ||
92 | * program could call directly through function pointers if desired. | ||
93 | * | ||
94 | * These numbers are part of the binary API and will not be changed | ||
95 | * without updating HV_VERSION, which should be a rare event. | ||
96 | */ | ||
97 | |||
98 | /** reserved. */ | ||
99 | #define _HV_DISPATCH_RESERVED 0 | ||
100 | |||
101 | /** hv_init */ | ||
102 | #define HV_DISPATCH_INIT 1 | ||
103 | |||
104 | /** hv_install_context */ | ||
105 | #define HV_DISPATCH_INSTALL_CONTEXT 2 | ||
106 | |||
107 | /** hv_sysconf */ | ||
108 | #define HV_DISPATCH_SYSCONF 3 | ||
109 | |||
110 | /** hv_get_rtc */ | ||
111 | #define HV_DISPATCH_GET_RTC 4 | ||
112 | |||
113 | /** hv_set_rtc */ | ||
114 | #define HV_DISPATCH_SET_RTC 5 | ||
115 | |||
116 | /** hv_flush_asid */ | ||
117 | #define HV_DISPATCH_FLUSH_ASID 6 | ||
118 | |||
119 | /** hv_flush_page */ | ||
120 | #define HV_DISPATCH_FLUSH_PAGE 7 | ||
121 | |||
122 | /** hv_flush_pages */ | ||
123 | #define HV_DISPATCH_FLUSH_PAGES 8 | ||
124 | |||
125 | /** hv_restart */ | ||
126 | #define HV_DISPATCH_RESTART 9 | ||
127 | |||
128 | /** hv_halt */ | ||
129 | #define HV_DISPATCH_HALT 10 | ||
130 | |||
131 | /** hv_power_off */ | ||
132 | #define HV_DISPATCH_POWER_OFF 11 | ||
133 | |||
134 | /** hv_inquire_physical */ | ||
135 | #define HV_DISPATCH_INQUIRE_PHYSICAL 12 | ||
136 | |||
137 | /** hv_inquire_memory_controller */ | ||
138 | #define HV_DISPATCH_INQUIRE_MEMORY_CONTROLLER 13 | ||
139 | |||
140 | /** hv_inquire_virtual */ | ||
141 | #define HV_DISPATCH_INQUIRE_VIRTUAL 14 | ||
142 | |||
143 | /** hv_inquire_asid */ | ||
144 | #define HV_DISPATCH_INQUIRE_ASID 15 | ||
145 | |||
146 | /** hv_nanosleep */ | ||
147 | #define HV_DISPATCH_NANOSLEEP 16 | ||
148 | |||
149 | /** hv_console_read_if_ready */ | ||
150 | #define HV_DISPATCH_CONSOLE_READ_IF_READY 17 | ||
151 | |||
152 | /** hv_console_write */ | ||
153 | #define HV_DISPATCH_CONSOLE_WRITE 18 | ||
154 | |||
155 | /** hv_downcall_dispatch */ | ||
156 | #define HV_DISPATCH_DOWNCALL_DISPATCH 19 | ||
157 | |||
158 | /** hv_inquire_topology */ | ||
159 | #define HV_DISPATCH_INQUIRE_TOPOLOGY 20 | ||
160 | |||
161 | /** hv_fs_findfile */ | ||
162 | #define HV_DISPATCH_FS_FINDFILE 21 | ||
163 | |||
164 | /** hv_fs_fstat */ | ||
165 | #define HV_DISPATCH_FS_FSTAT 22 | ||
166 | |||
167 | /** hv_fs_pread */ | ||
168 | #define HV_DISPATCH_FS_PREAD 23 | ||
169 | |||
170 | /** hv_physaddr_read64 */ | ||
171 | #define HV_DISPATCH_PHYSADDR_READ64 24 | ||
172 | |||
173 | /** hv_physaddr_write64 */ | ||
174 | #define HV_DISPATCH_PHYSADDR_WRITE64 25 | ||
175 | |||
176 | /** hv_get_command_line */ | ||
177 | #define HV_DISPATCH_GET_COMMAND_LINE 26 | ||
178 | |||
179 | /** hv_set_caching */ | ||
180 | #define HV_DISPATCH_SET_CACHING 27 | ||
181 | |||
182 | /** hv_bzero_page */ | ||
183 | #define HV_DISPATCH_BZERO_PAGE 28 | ||
184 | |||
185 | /** hv_register_message_state */ | ||
186 | #define HV_DISPATCH_REGISTER_MESSAGE_STATE 29 | ||
187 | |||
188 | /** hv_send_message */ | ||
189 | #define HV_DISPATCH_SEND_MESSAGE 30 | ||
190 | |||
191 | /** hv_receive_message */ | ||
192 | #define HV_DISPATCH_RECEIVE_MESSAGE 31 | ||
193 | |||
194 | /** hv_inquire_context */ | ||
195 | #define HV_DISPATCH_INQUIRE_CONTEXT 32 | ||
196 | |||
197 | /** hv_start_all_tiles */ | ||
198 | #define HV_DISPATCH_START_ALL_TILES 33 | ||
199 | |||
200 | /** hv_dev_open */ | ||
201 | #define HV_DISPATCH_DEV_OPEN 34 | ||
202 | |||
203 | /** hv_dev_close */ | ||
204 | #define HV_DISPATCH_DEV_CLOSE 35 | ||
205 | |||
206 | /** hv_dev_pread */ | ||
207 | #define HV_DISPATCH_DEV_PREAD 36 | ||
208 | |||
209 | /** hv_dev_pwrite */ | ||
210 | #define HV_DISPATCH_DEV_PWRITE 37 | ||
211 | |||
212 | /** hv_dev_poll */ | ||
213 | #define HV_DISPATCH_DEV_POLL 38 | ||
214 | |||
215 | /** hv_dev_poll_cancel */ | ||
216 | #define HV_DISPATCH_DEV_POLL_CANCEL 39 | ||
217 | |||
218 | /** hv_dev_preada */ | ||
219 | #define HV_DISPATCH_DEV_PREADA 40 | ||
220 | |||
221 | /** hv_dev_pwritea */ | ||
222 | #define HV_DISPATCH_DEV_PWRITEA 41 | ||
223 | |||
224 | /** hv_flush_remote */ | ||
225 | #define HV_DISPATCH_FLUSH_REMOTE 42 | ||
226 | |||
227 | /** hv_console_putc */ | ||
228 | #define HV_DISPATCH_CONSOLE_PUTC 43 | ||
229 | |||
230 | /** hv_inquire_tiles */ | ||
231 | #define HV_DISPATCH_INQUIRE_TILES 44 | ||
232 | |||
233 | /** hv_confstr */ | ||
234 | #define HV_DISPATCH_CONFSTR 45 | ||
235 | |||
236 | /** hv_reexec */ | ||
237 | #define HV_DISPATCH_REEXEC 46 | ||
238 | |||
239 | /** hv_set_command_line */ | ||
240 | #define HV_DISPATCH_SET_COMMAND_LINE 47 | ||
241 | |||
242 | #if !CHIP_HAS_IPI() | ||
243 | |||
244 | /** hv_clear_intr */ | ||
245 | #define HV_DISPATCH_CLEAR_INTR 48 | ||
246 | |||
247 | /** hv_enable_intr */ | ||
248 | #define HV_DISPATCH_ENABLE_INTR 49 | ||
249 | |||
250 | /** hv_disable_intr */ | ||
251 | #define HV_DISPATCH_DISABLE_INTR 50 | ||
252 | |||
253 | /** hv_raise_intr */ | ||
254 | #define HV_DISPATCH_RAISE_INTR 51 | ||
255 | |||
256 | /** hv_trigger_ipi */ | ||
257 | #define HV_DISPATCH_TRIGGER_IPI 52 | ||
258 | |||
259 | #endif /* !CHIP_HAS_IPI() */ | ||
260 | |||
261 | /** hv_store_mapping */ | ||
262 | #define HV_DISPATCH_STORE_MAPPING 53 | ||
263 | |||
264 | /** hv_inquire_realpa */ | ||
265 | #define HV_DISPATCH_INQUIRE_REALPA 54 | ||
266 | |||
267 | /** hv_flush_all */ | ||
268 | #define HV_DISPATCH_FLUSH_ALL 55 | ||
269 | |||
270 | #if CHIP_HAS_IPI() | ||
271 | /** hv_get_ipi_pte */ | ||
272 | #define HV_DISPATCH_GET_IPI_PTE 56 | ||
273 | #endif | ||
274 | |||
275 | /** One more than the largest dispatch value */ | ||
276 | #define _HV_DISPATCH_END 57 | ||
277 | |||
278 | |||
279 | #ifndef __ASSEMBLER__ | ||
280 | |||
281 | #ifdef __KERNEL__ | ||
282 | #include <asm/types.h> | ||
283 | typedef u32 __hv32; /**< 32-bit value */ | ||
284 | typedef u64 __hv64; /**< 64-bit value */ | ||
285 | #else | ||
286 | #include <stdint.h> | ||
287 | typedef uint32_t __hv32; /**< 32-bit value */ | ||
288 | typedef uint64_t __hv64; /**< 64-bit value */ | ||
289 | #endif | ||
290 | |||
291 | |||
292 | /** Hypervisor physical address. */ | ||
293 | typedef __hv64 HV_PhysAddr; | ||
294 | |||
295 | #if CHIP_VA_WIDTH() > 32 | ||
296 | /** Hypervisor virtual address. */ | ||
297 | typedef __hv64 HV_VirtAddr; | ||
298 | #else | ||
299 | /** Hypervisor virtual address. */ | ||
300 | typedef __hv32 HV_VirtAddr; | ||
301 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
302 | |||
303 | /** Hypervisor ASID. */ | ||
304 | typedef unsigned int HV_ASID; | ||
305 | |||
306 | /** Hypervisor tile location for a memory access | ||
307 | * ("location overridden target"). | ||
308 | */ | ||
309 | typedef unsigned int HV_LOTAR; | ||
310 | |||
311 | /** Hypervisor size of a page. */ | ||
312 | typedef unsigned long HV_PageSize; | ||
313 | |||
314 | /** A page table entry. | ||
315 | */ | ||
316 | typedef struct | ||
317 | { | ||
318 | __hv64 val; /**< Value of PTE */ | ||
319 | } HV_PTE; | ||
320 | |||
321 | /** Hypervisor error code. */ | ||
322 | typedef int HV_Errno; | ||
323 | |||
324 | #endif /* !__ASSEMBLER__ */ | ||
325 | |||
326 | #define HV_OK 0 /**< No error */ | ||
327 | #define HV_EINVAL -801 /**< Invalid argument */ | ||
328 | #define HV_ENODEV -802 /**< No such device */ | ||
329 | #define HV_ENOENT -803 /**< No such file or directory */ | ||
330 | #define HV_EBADF -804 /**< Bad file number */ | ||
331 | #define HV_EFAULT -805 /**< Bad address */ | ||
332 | #define HV_ERECIP -806 /**< Bad recipients */ | ||
333 | #define HV_E2BIG -807 /**< Message too big */ | ||
334 | #define HV_ENOTSUP -808 /**< Service not supported */ | ||
335 | #define HV_EBUSY -809 /**< Device busy */ | ||
336 | #define HV_ENOSYS -810 /**< Invalid syscall */ | ||
337 | #define HV_EPERM -811 /**< No permission */ | ||
338 | #define HV_ENOTREADY -812 /**< Device not ready */ | ||
339 | #define HV_EIO -813 /**< I/O error */ | ||
340 | #define HV_ENOMEM -814 /**< Out of memory */ | ||
341 | |||
342 | #define HV_ERR_MAX -801 /**< Largest HV error code */ | ||
343 | #define HV_ERR_MIN -814 /**< Smallest HV error code */ | ||
344 | |||
345 | #ifndef __ASSEMBLER__ | ||
346 | |||
347 | /** Pass HV_VERSION to hv_init to request this version of the interface. */ | ||
348 | typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; | ||
349 | |||
350 | /** Initializes the hypervisor. | ||
351 | * | ||
352 | * @param interface_version_number The version of the hypervisor interface | ||
353 | * that this program expects, typically HV_VERSION. | ||
354 | * @param chip_num Architecture number of the chip the client was built for. | ||
355 | * @param chip_rev_num Revision number of the chip the client was built for. | ||
356 | */ | ||
357 | void hv_init(HV_VersionNumber interface_version_number, | ||
358 | int chip_num, int chip_rev_num); | ||
359 | |||
360 | |||
361 | /** Queries we can make for hv_sysconf(). | ||
362 | * | ||
363 | * These numbers are part of the binary API and guaranteed not to change. | ||
364 | */ | ||
365 | typedef enum { | ||
366 | /** An invalid value; do not use. */ | ||
367 | _HV_SYSCONF_RESERVED = 0, | ||
368 | |||
369 | /** The length of the glue section containing the hv_ procs, in bytes. */ | ||
370 | HV_SYSCONF_GLUE_SIZE = 1, | ||
371 | |||
372 | /** The size of small pages, in bytes. */ | ||
373 | HV_SYSCONF_PAGE_SIZE_SMALL = 2, | ||
374 | |||
375 | /** The size of large pages, in bytes. */ | ||
376 | HV_SYSCONF_PAGE_SIZE_LARGE = 3, | ||
377 | |||
378 | /** Processor clock speed, in hertz. */ | ||
379 | HV_SYSCONF_CPU_SPEED = 4, | ||
380 | |||
381 | /** Processor temperature, in degrees Kelvin. The value | ||
382 | * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees | ||
383 | * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates | ||
384 | * that the temperature has hit an upper limit and is no longer being | ||
385 | * accurately tracked. | ||
386 | */ | ||
387 | HV_SYSCONF_CPU_TEMP = 5, | ||
388 | |||
389 | /** Board temperature, in degrees Kelvin. The value | ||
390 | * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees | ||
391 | * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates | ||
392 | * that the temperature has hit an upper limit and is no longer being | ||
393 | * accurately tracked. | ||
394 | */ | ||
395 | HV_SYSCONF_BOARD_TEMP = 6 | ||
396 | |||
397 | } HV_SysconfQuery; | ||
398 | |||
399 | /** Offset to subtract from returned Kelvin temperature to get degrees | ||
400 | Celsius. */ | ||
401 | #define HV_SYSCONF_TEMP_KTOC 273 | ||
402 | |||
403 | /** Pseudo-temperature value indicating that the temperature has | ||
404 | * pegged at its upper limit and is no longer accurate; note that this is | ||
405 | * the value after subtracting HV_SYSCONF_TEMP_KTOC. */ | ||
406 | #define HV_SYSCONF_OVERTEMP 999 | ||
407 | |||
408 | /** Query a configuration value from the hypervisor. | ||
409 | * @param query Which value is requested (HV_SYSCONF_xxx). | ||
410 | * @return The requested value, or -1 the requested value is illegal or | ||
411 | * unavailable. | ||
412 | */ | ||
413 | long hv_sysconf(HV_SysconfQuery query); | ||
414 | |||
415 | |||
416 | /** Queries we can make for hv_confstr(). | ||
417 | * | ||
418 | * These numbers are part of the binary API and guaranteed not to change. | ||
419 | */ | ||
420 | typedef enum { | ||
421 | /** An invalid value; do not use. */ | ||
422 | _HV_CONFSTR_RESERVED = 0, | ||
423 | |||
424 | /** Board part number. */ | ||
425 | HV_CONFSTR_BOARD_PART_NUM = 1, | ||
426 | |||
427 | /** Board serial number. */ | ||
428 | HV_CONFSTR_BOARD_SERIAL_NUM = 2, | ||
429 | |||
430 | /** Chip serial number. */ | ||
431 | HV_CONFSTR_CHIP_SERIAL_NUM = 3, | ||
432 | |||
433 | /** Board revision level. */ | ||
434 | HV_CONFSTR_BOARD_REV = 4, | ||
435 | |||
436 | /** Hypervisor software version. */ | ||
437 | HV_CONFSTR_HV_SW_VER = 5, | ||
438 | |||
439 | /** The name for this chip model. */ | ||
440 | HV_CONFSTR_CHIP_MODEL = 6, | ||
441 | |||
442 | /** Human-readable board description. */ | ||
443 | HV_CONFSTR_BOARD_DESC = 7, | ||
444 | |||
445 | /** Human-readable description of the hypervisor configuration. */ | ||
446 | HV_CONFSTR_HV_CONFIG = 8, | ||
447 | |||
448 | /** Human-readable version string for the boot image (for instance, | ||
449 | * who built it and when, what configuration file was used). */ | ||
450 | HV_CONFSTR_HV_CONFIG_VER = 9, | ||
451 | |||
452 | /** Mezzanine part number. */ | ||
453 | HV_CONFSTR_MEZZ_PART_NUM = 10, | ||
454 | |||
455 | /** Mezzanine serial number. */ | ||
456 | HV_CONFSTR_MEZZ_SERIAL_NUM = 11, | ||
457 | |||
458 | /** Mezzanine revision level. */ | ||
459 | HV_CONFSTR_MEZZ_REV = 12, | ||
460 | |||
461 | /** Human-readable mezzanine description. */ | ||
462 | HV_CONFSTR_MEZZ_DESC = 13, | ||
463 | |||
464 | /** Control path for the onboard network switch. */ | ||
465 | HV_CONFSTR_SWITCH_CONTROL = 14, | ||
466 | |||
467 | /** Chip revision level. */ | ||
468 | HV_CONFSTR_CHIP_REV = 15 | ||
469 | |||
470 | } HV_ConfstrQuery; | ||
471 | |||
472 | /** Query a configuration string from the hypervisor. | ||
473 | * | ||
474 | * @param query Identifier for the specific string to be retrieved | ||
475 | * (HV_CONFSTR_xxx). | ||
476 | * @param buf Buffer in which to place the string. | ||
477 | * @param len Length of the buffer. | ||
478 | * @return If query is valid, then the length of the corresponding string, | ||
479 | * including the trailing null; if this is greater than len, the string | ||
480 | * was truncated. If query is invalid, HV_EINVAL. If the specified | ||
481 | * buffer is not writable by the client, HV_EFAULT. | ||
482 | */ | ||
483 | int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); | ||
484 | |||
485 | /** Tile coordinate */ | ||
486 | typedef struct | ||
487 | { | ||
488 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
489 | int x; | ||
490 | |||
491 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
492 | int y; | ||
493 | } HV_Coord; | ||
494 | |||
495 | |||
496 | #if CHIP_HAS_IPI() | ||
497 | |||
498 | /** Get the PTE for sending an IPI to a particular tile. | ||
499 | * | ||
500 | * @param tile Tile which will receive the IPI. | ||
501 | * @param pl Indicates which IPI registers: 0 = IPI_0, 1 = IPI_1. | ||
502 | * @param pte Filled with resulting PTE. | ||
503 | * @result Zero if no error, non-zero for invalid parameters. | ||
504 | */ | ||
505 | int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte); | ||
506 | |||
507 | #else /* !CHIP_HAS_IPI() */ | ||
508 | |||
509 | /** A set of interrupts. */ | ||
510 | typedef __hv32 HV_IntrMask; | ||
511 | |||
512 | /** The low interrupt numbers are reserved for use by the client in | ||
513 | * delivering IPIs. Any interrupt numbers higher than this value are | ||
514 | * reserved for use by HV device drivers. */ | ||
515 | #define HV_MAX_IPI_INTERRUPT 7 | ||
516 | |||
517 | /** Enable a set of device interrupts. | ||
518 | * | ||
519 | * @param enab_mask Bitmap of interrupts to enable. | ||
520 | */ | ||
521 | void hv_enable_intr(HV_IntrMask enab_mask); | ||
522 | |||
523 | /** Disable a set of device interrupts. | ||
524 | * | ||
525 | * @param disab_mask Bitmap of interrupts to disable. | ||
526 | */ | ||
527 | void hv_disable_intr(HV_IntrMask disab_mask); | ||
528 | |||
529 | /** Clear a set of device interrupts. | ||
530 | * | ||
531 | * @param clear_mask Bitmap of interrupts to clear. | ||
532 | */ | ||
533 | void hv_clear_intr(HV_IntrMask clear_mask); | ||
534 | |||
535 | /** Assert a set of device interrupts. | ||
536 | * | ||
537 | * @param assert_mask Bitmap of interrupts to clear. | ||
538 | */ | ||
539 | void hv_assert_intr(HV_IntrMask assert_mask); | ||
540 | |||
541 | /** Trigger a one-shot interrupt on some tile | ||
542 | * | ||
543 | * @param tile Which tile to interrupt. | ||
544 | * @param interrupt Interrupt number to trigger; must be between 0 and | ||
545 | * HV_MAX_IPI_INTERRUPT. | ||
546 | * @return HV_OK on success, or a hypervisor error code. | ||
547 | */ | ||
548 | HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt); | ||
549 | |||
550 | #endif /* !CHIP_HAS_IPI() */ | ||
551 | |||
552 | /** Store memory mapping in debug memory so that external debugger can read it. | ||
553 | * A maximum of 16 entries can be stored. | ||
554 | * | ||
555 | * @param va VA of memory that is mapped. | ||
556 | * @param len Length of mapped memory. | ||
557 | * @param pa PA of memory that is mapped. | ||
558 | * @return 0 on success, -1 if the maximum number of mappings is exceeded. | ||
559 | */ | ||
560 | int hv_store_mapping(HV_VirtAddr va, unsigned int len, HV_PhysAddr pa); | ||
561 | |||
562 | /** Given a client PA and a length, return its real (HV) PA. | ||
563 | * | ||
564 | * @param cpa Client physical address. | ||
565 | * @param len Length of mapped memory. | ||
566 | * @return physical address, or -1 if cpa or len is not valid. | ||
567 | */ | ||
568 | HV_PhysAddr hv_inquire_realpa(HV_PhysAddr cpa, unsigned int len); | ||
569 | |||
570 | /** RTC return flag for no RTC chip present. | ||
571 | */ | ||
572 | #define HV_RTC_NO_CHIP 0x1 | ||
573 | |||
574 | /** RTC return flag for low-voltage condition, indicating that battery had | ||
575 | * died and time read is unreliable. | ||
576 | */ | ||
577 | #define HV_RTC_LOW_VOLTAGE 0x2 | ||
578 | |||
579 | /** Date/Time of day */ | ||
580 | typedef struct { | ||
581 | #if CHIP_WORD_SIZE() > 32 | ||
582 | __hv64 tm_sec; /**< Seconds, 0-59 */ | ||
583 | __hv64 tm_min; /**< Minutes, 0-59 */ | ||
584 | __hv64 tm_hour; /**< Hours, 0-23 */ | ||
585 | __hv64 tm_mday; /**< Day of month, 0-30 */ | ||
586 | __hv64 tm_mon; /**< Month, 0-11 */ | ||
587 | __hv64 tm_year; /**< Years since 1900, 0-199 */ | ||
588 | __hv64 flags; /**< Return flags, 0 if no error */ | ||
589 | #else | ||
590 | __hv32 tm_sec; /**< Seconds, 0-59 */ | ||
591 | __hv32 tm_min; /**< Minutes, 0-59 */ | ||
592 | __hv32 tm_hour; /**< Hours, 0-23 */ | ||
593 | __hv32 tm_mday; /**< Day of month, 0-30 */ | ||
594 | __hv32 tm_mon; /**< Month, 0-11 */ | ||
595 | __hv32 tm_year; /**< Years since 1900, 0-199 */ | ||
596 | __hv32 flags; /**< Return flags, 0 if no error */ | ||
597 | #endif | ||
598 | } HV_RTCTime; | ||
599 | |||
600 | /** Read the current time-of-day clock. | ||
601 | * @return HV_RTCTime of current time (GMT). | ||
602 | */ | ||
603 | HV_RTCTime hv_get_rtc(void); | ||
604 | |||
605 | |||
606 | /** Set the current time-of-day clock. | ||
607 | * @param time time to reset time-of-day to (GMT). | ||
608 | */ | ||
609 | void hv_set_rtc(HV_RTCTime time); | ||
610 | |||
611 | /** Installs a context, comprising a page table and other attributes. | ||
612 | * | ||
613 | * Once this service completes, page_table will be used to translate | ||
614 | * subsequent virtual address references to physical memory. | ||
615 | * | ||
616 | * Installing a context does not cause an implicit TLB flush. Before | ||
617 | * reusing an ASID value for a different address space, the client is | ||
618 | * expected to flush old references from the TLB with hv_flush_asid(). | ||
619 | * (Alternately, hv_flush_all() may be used to flush many ASIDs at once.) | ||
620 | * After invalidating a page table entry, changing its attributes, or | ||
621 | * changing its target CPA, the client is expected to flush old references | ||
622 | * from the TLB with hv_flush_page() or hv_flush_pages(). Making a | ||
623 | * previously invalid page valid does not require a flush. | ||
624 | * | ||
625 | * Specifying an invalid ASID, or an invalid CPA (client physical address) | ||
626 | * (either as page_table_pointer, or within the referenced table), | ||
627 | * or another page table data item documented as above as illegal may | ||
628 | * lead to client termination; since the validation of the table is | ||
629 | * done as needed, this may happen before the service returns, or at | ||
630 | * some later time, or never, depending upon the client's pattern of | ||
631 | * memory references. Page table entries which supply translations for | ||
632 | * invalid virtual addresses may result in client termination, or may | ||
633 | * be silently ignored. "Invalid" in this context means a value which | ||
634 | * was not provided to the client via the appropriate hv_inquire_* routine. | ||
635 | * | ||
636 | * To support changing the instruction VAs at the same time as | ||
637 | * installing the new page table, this call explicitly supports | ||
638 | * setting the "lr" register to a different address and then jumping | ||
639 | * directly to the hv_install_context() routine. In this case, the | ||
640 | * new page table does not need to contain any mapping for the | ||
641 | * hv_install_context address itself. | ||
642 | * | ||
643 | * @param page_table Root of the page table. | ||
644 | * @param access PTE providing info on how to read the page table. This | ||
645 | * value must be consistent between multiple tiles sharing a page table, | ||
646 | * and must also be consistent with any virtual mappings the client | ||
647 | * may be using to access the page table. | ||
648 | * @param asid HV_ASID the page table is to be used for. | ||
649 | * @param flags Context flags, denoting attributes or privileges of the | ||
650 | * current context (HV_CTX_xxx). | ||
651 | * @return Zero on success, or a hypervisor error code on failure. | ||
652 | */ | ||
653 | int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid, | ||
654 | __hv32 flags); | ||
655 | |||
656 | #endif /* !__ASSEMBLER__ */ | ||
657 | |||
658 | #define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from | ||
659 | PL0. */ | ||
660 | |||
661 | #ifndef __ASSEMBLER__ | ||
662 | |||
663 | /** Value returned from hv_inquire_context(). */ | ||
664 | typedef struct | ||
665 | { | ||
666 | /** Physical address of page table */ | ||
667 | HV_PhysAddr page_table; | ||
668 | |||
669 | /** PTE which defines access method for top of page table */ | ||
670 | HV_PTE access; | ||
671 | |||
672 | /** ASID associated with this page table */ | ||
673 | HV_ASID asid; | ||
674 | |||
675 | /** Context flags */ | ||
676 | __hv32 flags; | ||
677 | } HV_Context; | ||
678 | |||
679 | /** Retrieve information about the currently installed context. | ||
680 | * @return The data passed to the last successful hv_install_context call. | ||
681 | */ | ||
682 | HV_Context hv_inquire_context(void); | ||
683 | |||
684 | |||
685 | /** Flushes all translations associated with the named address space | ||
686 | * identifier from the TLB and any other hypervisor data structures. | ||
687 | * Translations installed with the "global" bit are not flushed. | ||
688 | * | ||
689 | * Specifying an invalid ASID may lead to client termination. "Invalid" | ||
690 | * in this context means a value which was not provided to the client | ||
691 | * via <tt>hv_inquire_asid()</tt>. | ||
692 | * | ||
693 | * @param asid HV_ASID whose entries are to be flushed. | ||
694 | * @return Zero on success, or a hypervisor error code on failure. | ||
695 | */ | ||
696 | int hv_flush_asid(HV_ASID asid); | ||
697 | |||
698 | |||
699 | /** Flushes all translations associated with the named virtual address | ||
700 | * and page size from the TLB and other hypervisor data structures. Only | ||
701 | * pages visible to the current ASID are affected; note that this includes | ||
702 | * global pages in addition to pages specific to the current ASID. | ||
703 | * | ||
704 | * The supplied VA need not be aligned; it may be anywhere in the | ||
705 | * subject page. | ||
706 | * | ||
707 | * Specifying an invalid virtual address may lead to client termination, | ||
708 | * or may silently succeed. "Invalid" in this context means a value | ||
709 | * which was not provided to the client via hv_inquire_virtual. | ||
710 | * | ||
711 | * @param address Address of the page to flush. | ||
712 | * @param page_size Size of pages to assume. | ||
713 | * @return Zero on success, or a hypervisor error code on failure. | ||
714 | */ | ||
715 | int hv_flush_page(HV_VirtAddr address, HV_PageSize page_size); | ||
716 | |||
717 | |||
718 | /** Flushes all translations associated with the named virtual address range | ||
719 | * and page size from the TLB and other hypervisor data structures. Only | ||
720 | * pages visible to the current ASID are affected; note that this includes | ||
721 | * global pages in addition to pages specific to the current ASID. | ||
722 | * | ||
723 | * The supplied VA need not be aligned; it may be anywhere in the | ||
724 | * subject page. | ||
725 | * | ||
726 | * Specifying an invalid virtual address may lead to client termination, | ||
727 | * or may silently succeed. "Invalid" in this context means a value | ||
728 | * which was not provided to the client via hv_inquire_virtual. | ||
729 | * | ||
730 | * @param start Address to flush. | ||
731 | * @param page_size Size of pages to assume. | ||
732 | * @param size The number of bytes to flush. Any page in the range | ||
733 | * [start, start + size) will be flushed from the TLB. | ||
734 | * @return Zero on success, or a hypervisor error code on failure. | ||
735 | */ | ||
736 | int hv_flush_pages(HV_VirtAddr start, HV_PageSize page_size, | ||
737 | unsigned long size); | ||
738 | |||
739 | |||
740 | /** Flushes all non-global translations (if preserve_global is true), | ||
741 | * or absolutely all translations (if preserve_global is false). | ||
742 | * | ||
743 | * @param preserve_global Non-zero if we want to preserve "global" mappings. | ||
744 | * @return Zero on success, or a hypervisor error code on failure. | ||
745 | */ | ||
746 | int hv_flush_all(int preserve_global); | ||
747 | |||
748 | |||
749 | /** Restart machine with optional restart command and optional args. | ||
750 | * @param cmd Const pointer to command to restart with, or NULL | ||
751 | * @param args Const pointer to argument string to restart with, or NULL | ||
752 | */ | ||
753 | void hv_restart(HV_VirtAddr cmd, HV_VirtAddr args); | ||
754 | |||
755 | |||
756 | /** Halt machine. */ | ||
757 | void hv_halt(void); | ||
758 | |||
759 | |||
760 | /** Power off machine. */ | ||
761 | void hv_power_off(void); | ||
762 | |||
763 | |||
764 | /** Re-enter virtual-is-physical memory translation mode and restart | ||
765 | * execution at a given address. | ||
766 | * @param entry Client physical address at which to begin execution. | ||
767 | * @return A hypervisor error code on failure; if the operation is | ||
768 | * successful the call does not return. | ||
769 | */ | ||
770 | int hv_reexec(HV_PhysAddr entry); | ||
771 | |||
772 | |||
773 | /** Chip topology */ | ||
774 | typedef struct | ||
775 | { | ||
776 | /** Relative coordinates of the querying tile */ | ||
777 | HV_Coord coord; | ||
778 | |||
779 | /** Width of the querying supervisor's tile rectangle. */ | ||
780 | int width; | ||
781 | |||
782 | /** Height of the querying supervisor's tile rectangle. */ | ||
783 | int height; | ||
784 | |||
785 | } HV_Topology; | ||
786 | |||
787 | /** Returns information about the tile coordinate system. | ||
788 | * | ||
789 | * Each supervisor is given a rectangle of tiles it potentially controls. | ||
790 | * These tiles are labeled using a relative coordinate system with (0,0) as | ||
791 | * the upper left tile regardless of their physical location on the chip. | ||
792 | * | ||
793 | * This call returns both the size of that rectangle and the position | ||
794 | * within that rectangle of the querying tile. | ||
795 | * | ||
796 | * Not all tiles within that rectangle may be available to the supervisor; | ||
797 | * to get the precise set of available tiles, you must also call | ||
798 | * hv_inquire_tiles(HV_INQ_TILES_AVAIL, ...). | ||
799 | **/ | ||
800 | HV_Topology hv_inquire_topology(void); | ||
801 | |||
802 | /** Sets of tiles we can retrieve with hv_inquire_tiles(). | ||
803 | * | ||
804 | * These numbers are part of the binary API and guaranteed not to change. | ||
805 | */ | ||
806 | typedef enum { | ||
807 | /** An invalid value; do not use. */ | ||
808 | _HV_INQ_TILES_RESERVED = 0, | ||
809 | |||
810 | /** All available tiles within the supervisor's tile rectangle. */ | ||
811 | HV_INQ_TILES_AVAIL = 1, | ||
812 | |||
813 | /** The set of tiles used for hash-for-home caching. */ | ||
814 | HV_INQ_TILES_HFH_CACHE = 2, | ||
815 | |||
816 | /** The set of tiles that can be legally used as a LOTAR for a PTE. */ | ||
817 | HV_INQ_TILES_LOTAR = 3 | ||
818 | } HV_InqTileSet; | ||
819 | |||
820 | /** Returns specific information about various sets of tiles within the | ||
821 | * supervisor's tile rectangle. | ||
822 | * | ||
823 | * @param set Which set of tiles to retrieve. | ||
824 | * @param cpumask Pointer to a returned bitmask (in row-major order, | ||
825 | * supervisor-relative) of tiles. The low bit of the first word | ||
826 | * corresponds to the tile at the upper left-hand corner of the | ||
827 | * supervisor's rectangle. In order for the supervisor to know the | ||
828 | * buffer length to supply, it should first call hv_inquire_topology. | ||
829 | * @param length Number of bytes available for the returned bitmask. | ||
830 | **/ | ||
831 | HV_Errno hv_inquire_tiles(HV_InqTileSet set, HV_VirtAddr cpumask, int length); | ||
832 | |||
833 | |||
834 | /** An identifier for a memory controller. Multiple memory controllers | ||
835 | * may be connected to one chip, and this uniquely identifies each one. | ||
836 | */ | ||
837 | typedef int HV_MemoryController; | ||
838 | |||
839 | /** A range of physical memory. */ | ||
840 | typedef struct | ||
841 | { | ||
842 | HV_PhysAddr start; /**< Starting address. */ | ||
843 | __hv64 size; /**< Size in bytes. */ | ||
844 | HV_MemoryController controller; /**< Which memory controller owns this. */ | ||
845 | } HV_PhysAddrRange; | ||
846 | |||
847 | /** Returns information about a range of physical memory. | ||
848 | * | ||
849 | * hv_inquire_physical() returns one of the ranges of client | ||
850 | * physical addresses which are available to this client. | ||
851 | * | ||
852 | * The first range is retrieved by specifying an idx of 0, and | ||
853 | * successive ranges are returned with subsequent idx values. Ranges | ||
854 | * are ordered by increasing start address (i.e., as idx increases, | ||
855 | * so does start), do not overlap, and do not touch (i.e., the | ||
856 | * available memory is described with the fewest possible ranges). | ||
857 | * | ||
858 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
859 | * A client can count the number of ranges by increasing idx until the | ||
860 | * returned size is zero. There will always be at least one valid range. | ||
861 | * | ||
862 | * Some clients might not be prepared to deal with more than one | ||
863 | * physical address range; they still ought to call this routine and | ||
864 | * issue a warning message if they're given more than one range, on the | ||
865 | * theory that whoever configured the hypervisor to provide that memory | ||
866 | * should know that it's being wasted. | ||
867 | */ | ||
868 | HV_PhysAddrRange hv_inquire_physical(int idx); | ||
869 | |||
870 | |||
871 | /** Memory controller information. */ | ||
872 | typedef struct | ||
873 | { | ||
874 | HV_Coord coord; /**< Relative tile coordinates of the port used by a | ||
875 | specified tile to communicate with this controller. */ | ||
876 | __hv64 speed; /**< Speed of this controller in bytes per second. */ | ||
877 | } HV_MemoryControllerInfo; | ||
878 | |||
879 | /** Returns information about a particular memory controller. | ||
880 | * | ||
881 | * hv_inquire_memory_controller(coord,idx) returns information about a | ||
882 | * particular controller. Two pieces of information are returned: | ||
883 | * - The relative coordinates of the port on the controller that the specified | ||
884 | * tile would use to contact it. The relative coordinates may lie | ||
885 | * outside the supervisor's rectangle, i.e. the controller may not | ||
886 | * be attached to a node managed by the querying node's supervisor. | ||
887 | * In particular note that x or y may be negative. | ||
888 | * - The speed of the memory controller. (This is a not-to-exceed value | ||
889 | * based on the raw hardware data rate, and may not be achievable in | ||
890 | * practice; it is provided to give clients information on the relative | ||
891 | * performance of the available controllers.) | ||
892 | * | ||
893 | * Clients should avoid calling this interface with invalid values. | ||
894 | * A client who does may be terminated. | ||
895 | * @param coord Tile for which to calculate the relative port position. | ||
896 | * @param controller Index of the controller; identical to value returned | ||
897 | * from other routines like hv_inquire_physical. | ||
898 | * @return Information about the controller. | ||
899 | */ | ||
900 | HV_MemoryControllerInfo hv_inquire_memory_controller(HV_Coord coord, | ||
901 | int controller); | ||
902 | |||
903 | |||
904 | /** A range of virtual memory. */ | ||
905 | typedef struct | ||
906 | { | ||
907 | HV_VirtAddr start; /**< Starting address. */ | ||
908 | __hv64 size; /**< Size in bytes. */ | ||
909 | } HV_VirtAddrRange; | ||
910 | |||
911 | /** Returns information about a range of virtual memory. | ||
912 | * | ||
913 | * hv_inquire_virtual() returns one of the ranges of client | ||
914 | * virtual addresses which are available to this client. | ||
915 | * | ||
916 | * The first range is retrieved by specifying an idx of 0, and | ||
917 | * successive ranges are returned with subsequent idx values. Ranges | ||
918 | * are ordered by increasing start address (i.e., as idx increases, | ||
919 | * so does start), do not overlap, and do not touch (i.e., the | ||
920 | * available memory is described with the fewest possible ranges). | ||
921 | * | ||
922 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
923 | * A client can count the number of ranges by increasing idx until the | ||
924 | * returned size is zero. There will always be at least one valid range. | ||
925 | * | ||
926 | * Some clients may well have various virtual addresses hardwired | ||
927 | * into themselves; for instance, their instruction stream may | ||
928 | * have been compiled expecting to live at a particular address. | ||
929 | * Such clients should use this interface to verify they've been | ||
930 | * given the virtual address space they expect, and issue a (potentially | ||
931 | * fatal) warning message otherwise. | ||
932 | * | ||
933 | * Note that the returned size is a __hv64, not a __hv32, so it is | ||
934 | * possible to express a single range spanning the entire 32-bit | ||
935 | * address space. | ||
936 | */ | ||
937 | HV_VirtAddrRange hv_inquire_virtual(int idx); | ||
938 | |||
939 | |||
940 | /** A range of ASID values. */ | ||
941 | typedef struct | ||
942 | { | ||
943 | HV_ASID start; /**< First ASID in the range. */ | ||
944 | unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */ | ||
945 | } HV_ASIDRange; | ||
946 | |||
947 | /** Returns information about a range of ASIDs. | ||
948 | * | ||
949 | * hv_inquire_asid() returns one of the ranges of address | ||
950 | * space identifiers which are available to this client. | ||
951 | * | ||
952 | * The first range is retrieved by specifying an idx of 0, and | ||
953 | * successive ranges are returned with subsequent idx values. Ranges | ||
954 | * are ordered by increasing start value (i.e., as idx increases, | ||
955 | * so does start), do not overlap, and do not touch (i.e., the | ||
956 | * available ASIDs are described with the fewest possible ranges). | ||
957 | * | ||
958 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
959 | * A client can count the number of ranges by increasing idx until the | ||
960 | * returned size is zero. There will always be at least one valid range. | ||
961 | */ | ||
962 | HV_ASIDRange hv_inquire_asid(int idx); | ||
963 | |||
964 | |||
965 | /** Waits for at least the specified number of nanoseconds then returns. | ||
966 | * | ||
967 | * @param nanosecs The number of nanoseconds to sleep. | ||
968 | */ | ||
969 | void hv_nanosleep(int nanosecs); | ||
970 | |||
971 | |||
972 | /** Reads a character from the console without blocking. | ||
973 | * | ||
974 | * @return A value from 0-255 indicates the value successfully read. | ||
975 | * A negative value means no value was ready. | ||
976 | */ | ||
977 | int hv_console_read_if_ready(void); | ||
978 | |||
979 | |||
980 | /** Writes a character to the console, blocking if the console is busy. | ||
981 | * | ||
982 | * This call cannot fail. If the console is broken for some reason, | ||
983 | * output will simply vanish. | ||
984 | * @param byte Character to write. | ||
985 | */ | ||
986 | void hv_console_putc(int byte); | ||
987 | |||
988 | |||
989 | /** Writes a string to the console, blocking if the console is busy. | ||
990 | * @param bytes Pointer to characters to write. | ||
991 | * @param len Number of characters to write. | ||
992 | * @return Number of characters written, or HV_EFAULT if the buffer is invalid. | ||
993 | */ | ||
994 | int hv_console_write(HV_VirtAddr bytes, int len); | ||
995 | |||
996 | |||
997 | /** Dispatch the next interrupt from the client downcall mechanism. | ||
998 | * | ||
999 | * The hypervisor uses downcalls to notify the client of asynchronous | ||
1000 | * events. Some of these events are hypervisor-created (like incoming | ||
1001 | * messages). Some are regular interrupts which initially occur in | ||
1002 | * the hypervisor, and are normally handled directly by the client; | ||
1003 | * when these occur in a client's interrupt critical section, they must | ||
1004 | * be delivered through the downcall mechanism. | ||
1005 | * | ||
1006 | * A downcall is initially delivered to the client as an INTCTRL_1 | ||
1007 | * interrupt. Upon entry to the INTCTRL_1 vector, the client must | ||
1008 | * immediately invoke the hv_downcall_dispatch service. This service | ||
1009 | * will not return; instead it will cause one of the client's actual | ||
1010 | * downcall-handling interrupt vectors to be entered. The EX_CONTEXT | ||
1011 | * registers in the client will be set so that when the client irets, | ||
1012 | * it will return to the code which was interrupted by the INTCTRL_1 | ||
1013 | * interrupt. | ||
1014 | * | ||
1015 | * Under some circumstances, the firing of INTCTRL_1 can race with | ||
1016 | * the lowering of a device interrupt. In such a case, the | ||
1017 | * hv_downcall_dispatch service may issue an iret instruction instead | ||
1018 | * of entering one of the client's actual downcall-handling interrupt | ||
1019 | * vectors. This will return execution to the location that was | ||
1020 | * interrupted by INTCTRL_1. | ||
1021 | * | ||
1022 | * Any saving of registers should be done by the actual handling | ||
1023 | * vectors; no registers should be changed by the INTCTRL_1 handler. | ||
1024 | * In particular, the client should not use a jal instruction to invoke | ||
1025 | * the hv_downcall_dispatch service, as that would overwrite the client's | ||
1026 | * lr register. Note that the hv_downcall_dispatch service may overwrite | ||
1027 | * one or more of the client's system save registers. | ||
1028 | * | ||
1029 | * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor | ||
1030 | * will set this register to cause a downcall to happen, and will clear | ||
1031 | * it when no further downcalls are pending. | ||
1032 | * | ||
1033 | * When a downcall vector is entered, the INTCTRL_1 interrupt will be | ||
1034 | * masked. When the client is done processing a downcall, and is ready | ||
1035 | * to accept another, it must unmask this interrupt; if more downcalls | ||
1036 | * are pending, this will cause the INTCTRL_1 vector to be reentered. | ||
1037 | * Currently the following interrupt vectors can be entered through a | ||
1038 | * downcall: | ||
1039 | * | ||
1040 | * INT_MESSAGE_RCV_DWNCL (hypervisor message available) | ||
1041 | * INT_DMATLB_MISS_DWNCL (DMA TLB miss) | ||
1042 | * INT_SNITLB_MISS_DWNCL (SNI TLB miss) | ||
1043 | * INT_DMATLB_ACCESS_DWNCL (DMA TLB access violation) | ||
1044 | */ | ||
1045 | void hv_downcall_dispatch(void); | ||
1046 | |||
1047 | #endif /* !__ASSEMBLER__ */ | ||
1048 | |||
1049 | /** We use actual interrupt vectors which never occur (they're only there | ||
1050 | * to allow setting MPLs for related SPRs) for our downcall vectors. | ||
1051 | */ | ||
1052 | /** Message receive downcall interrupt vector */ | ||
1053 | #define INT_MESSAGE_RCV_DWNCL INT_BOOT_ACCESS | ||
1054 | /** DMA TLB miss downcall interrupt vector */ | ||
1055 | #define INT_DMATLB_MISS_DWNCL INT_DMA_ASID | ||
1056 | /** Static nework processor instruction TLB miss interrupt vector */ | ||
1057 | #define INT_SNITLB_MISS_DWNCL INT_SNI_ASID | ||
1058 | /** DMA TLB access violation downcall interrupt vector */ | ||
1059 | #define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL | ||
1060 | /** Device interrupt downcall interrupt vector */ | ||
1061 | #define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS | ||
1062 | |||
1063 | #ifndef __ASSEMBLER__ | ||
1064 | |||
1065 | /** Requests the inode for a specific full pathname. | ||
1066 | * | ||
1067 | * Performs a lookup in the hypervisor filesystem for a given filename. | ||
1068 | * Multiple calls with the same filename will always return the same inode. | ||
1069 | * If there is no such filename, HV_ENOENT is returned. | ||
1070 | * A bad filename pointer may result in HV_EFAULT instead. | ||
1071 | * | ||
1072 | * @param filename Constant pointer to name of requested file | ||
1073 | * @return Inode of requested file | ||
1074 | */ | ||
1075 | int hv_fs_findfile(HV_VirtAddr filename); | ||
1076 | |||
1077 | |||
1078 | /** Data returned from an fstat request. | ||
1079 | * Note that this structure should be no more than 40 bytes in size so | ||
1080 | * that it can always be returned completely in registers. | ||
1081 | */ | ||
1082 | typedef struct | ||
1083 | { | ||
1084 | int size; /**< Size of file (or HV_Errno on error) */ | ||
1085 | unsigned int flags; /**< Flags (see HV_FS_FSTAT_FLAGS) */ | ||
1086 | } HV_FS_StatInfo; | ||
1087 | |||
1088 | /** Bitmask flags for fstat request */ | ||
1089 | typedef enum | ||
1090 | { | ||
1091 | HV_FS_ISDIR = 0x0001 /**< Is the entry a directory? */ | ||
1092 | } HV_FS_FSTAT_FLAGS; | ||
1093 | |||
1094 | /** Get stat information on a given file inode. | ||
1095 | * | ||
1096 | * Return information on the file with the given inode. | ||
1097 | * | ||
1098 | * IF the HV_FS_ISDIR bit is set, the "file" is a directory. Reading | ||
1099 | * it will return NUL-separated filenames (no directory part) relative | ||
1100 | * to the path to the inode of the directory "file". These can be | ||
1101 | * appended to the path to the directory "file" after a forward slash | ||
1102 | * to create additional filenames. Note that it is not required | ||
1103 | * that all valid paths be decomposable into valid parent directories; | ||
1104 | * a filesystem may validly have just a few files, none of which have | ||
1105 | * HV_FS_ISDIR set. However, if clients may wish to enumerate the | ||
1106 | * files in the filesystem, it is recommended to include all the | ||
1107 | * appropriate parent directory "files" to give a consistent view. | ||
1108 | * | ||
1109 | * An invalid file inode will cause an HV_EBADF error to be returned. | ||
1110 | * | ||
1111 | * @param inode The inode number of the query | ||
1112 | * @return An HV_FS_StatInfo structure | ||
1113 | */ | ||
1114 | HV_FS_StatInfo hv_fs_fstat(int inode); | ||
1115 | |||
1116 | |||
1117 | /** Read data from a specific hypervisor file. | ||
1118 | * On error, may return HV_EBADF for a bad inode or HV_EFAULT for a bad buf. | ||
1119 | * Reads near the end of the file will return fewer bytes than requested. | ||
1120 | * Reads at or beyond the end of a file will return zero. | ||
1121 | * | ||
1122 | * @param inode the hypervisor file to read | ||
1123 | * @param buf the buffer to read data into | ||
1124 | * @param length the number of bytes of data to read | ||
1125 | * @param offset the offset into the file to read the data from | ||
1126 | * @return number of bytes successfully read, or an HV_Errno code | ||
1127 | */ | ||
1128 | int hv_fs_pread(int inode, HV_VirtAddr buf, int length, int offset); | ||
1129 | |||
1130 | |||
1131 | /** Read a 64-bit word from the specified physical address. | ||
1132 | * The address must be 8-byte aligned. | ||
1133 | * Specifying an invalid physical address will lead to client termination. | ||
1134 | * @param addr The physical address to read | ||
1135 | * @param access The PTE describing how to read the memory | ||
1136 | * @return The 64-bit value read from the given address | ||
1137 | */ | ||
1138 | unsigned long long hv_physaddr_read64(HV_PhysAddr addr, HV_PTE access); | ||
1139 | |||
1140 | |||
1141 | /** Write a 64-bit word to the specified physical address. | ||
1142 | * The address must be 8-byte aligned. | ||
1143 | * Specifying an invalid physical address will lead to client termination. | ||
1144 | * @param addr The physical address to write | ||
1145 | * @param access The PTE that says how to write the memory | ||
1146 | * @param val The 64-bit value to write to the given address | ||
1147 | */ | ||
1148 | void hv_physaddr_write64(HV_PhysAddr addr, HV_PTE access, | ||
1149 | unsigned long long val); | ||
1150 | |||
1151 | |||
1152 | /** Get the value of the command-line for the supervisor, if any. | ||
1153 | * This will not include the filename of the booted supervisor, but may | ||
1154 | * include configured-in boot arguments or the hv_restart() arguments. | ||
1155 | * If the buffer is not long enough the hypervisor will NUL the first | ||
1156 | * character of the buffer but not write any other data. | ||
1157 | * @param buf The virtual address to write the command-line string to. | ||
1158 | * @param length The length of buf, in characters. | ||
1159 | * @return The actual length of the command line, including the trailing NUL | ||
1160 | * (may be larger than "length"). | ||
1161 | */ | ||
1162 | int hv_get_command_line(HV_VirtAddr buf, int length); | ||
1163 | |||
1164 | |||
1165 | /** Set a new value for the command-line for the supervisor, which will | ||
1166 | * be returned from subsequent invocations of hv_get_command_line() on | ||
1167 | * this tile. | ||
1168 | * @param buf The virtual address to read the command-line string from. | ||
1169 | * @param length The length of buf, in characters; must be no more than | ||
1170 | * HV_COMMAND_LINE_LEN. | ||
1171 | * @return Zero if successful, or a hypervisor error code. | ||
1172 | */ | ||
1173 | HV_Errno hv_set_command_line(HV_VirtAddr buf, int length); | ||
1174 | |||
1175 | /** Maximum size of a command line passed to hv_set_command_line(); note | ||
1176 | * that a line returned from hv_get_command_line() could be larger than | ||
1177 | * this.*/ | ||
1178 | #define HV_COMMAND_LINE_LEN 256 | ||
1179 | |||
1180 | /** Tell the hypervisor how to cache non-priority pages | ||
1181 | * (its own as well as pages explicitly represented in page tables). | ||
1182 | * Normally these will be represented as red/black pages, but | ||
1183 | * when the supervisor starts to allocate "priority" pages in the PTE | ||
1184 | * the hypervisor will need to start marking those pages as (e.g.) "red" | ||
1185 | * and non-priority pages as either "black" (if they cache-alias | ||
1186 | * with the existing priority pages) or "red/black" (if they don't). | ||
1187 | * The bitmask provides information on which parts of the cache | ||
1188 | * have been used for pinned pages so far on this tile; if (1 << N) | ||
1189 | * appears in the bitmask, that indicates that a page has been marked | ||
1190 | * "priority" whose PFN equals N, mod 8. | ||
1191 | * @param bitmask A bitmap of priority page set values | ||
1192 | */ | ||
1193 | void hv_set_caching(unsigned int bitmask); | ||
1194 | |||
1195 | |||
1196 | /** Zero out a specified number of pages. | ||
1197 | * The va and size must both be multiples of 4096. | ||
1198 | * Caches are bypassed and memory is directly set to zero. | ||
1199 | * This API is implemented only in the magic hypervisor and is intended | ||
1200 | * to provide a performance boost to the minimal supervisor by | ||
1201 | * giving it a fast way to zero memory pages when allocating them. | ||
1202 | * @param va Virtual address where the page has been mapped | ||
1203 | * @param size Number of bytes (must be a page size multiple) | ||
1204 | */ | ||
1205 | void hv_bzero_page(HV_VirtAddr va, unsigned int size); | ||
1206 | |||
1207 | |||
1208 | /** State object for the hypervisor messaging subsystem. */ | ||
1209 | typedef struct | ||
1210 | { | ||
1211 | #if CHIP_VA_WIDTH() > 32 | ||
1212 | __hv64 opaque[2]; /**< No user-serviceable parts inside */ | ||
1213 | #else | ||
1214 | __hv32 opaque[2]; /**< No user-serviceable parts inside */ | ||
1215 | #endif | ||
1216 | } | ||
1217 | HV_MsgState; | ||
1218 | |||
1219 | /** Register to receive incoming messages. | ||
1220 | * | ||
1221 | * This routine configures the current tile so that it can receive | ||
1222 | * incoming messages. It must be called before the client can receive | ||
1223 | * messages with the hv_receive_message routine, and must be called on | ||
1224 | * each tile which will receive messages. | ||
1225 | * | ||
1226 | * msgstate is the virtual address of a state object of type HV_MsgState. | ||
1227 | * Once the state is registered, the client must not read or write the | ||
1228 | * state object; doing so will cause undefined results. | ||
1229 | * | ||
1230 | * If this routine is called with msgstate set to 0, the client's message | ||
1231 | * state will be freed and it will no longer be able to receive messages. | ||
1232 | * Note that this may cause the loss of any as-yet-undelivered messages | ||
1233 | * for the client. | ||
1234 | * | ||
1235 | * If another client attempts to send a message to a client which has | ||
1236 | * not yet called hv_register_message_state, or which has freed its | ||
1237 | * message state, the message will not be delivered, as if the client | ||
1238 | * had insufficient buffering. | ||
1239 | * | ||
1240 | * This routine returns HV_OK if the registration was successful, and | ||
1241 | * HV_EINVAL if the supplied state object is unsuitable. Note that some | ||
1242 | * errors may not be detected during this routine, but might be detected | ||
1243 | * during a subsequent message delivery. | ||
1244 | * @param msgstate State object. | ||
1245 | **/ | ||
1246 | HV_Errno hv_register_message_state(HV_MsgState* msgstate); | ||
1247 | |||
1248 | /** Possible message recipient states. */ | ||
1249 | typedef enum | ||
1250 | { | ||
1251 | HV_TO_BE_SENT, /**< Not sent (not attempted, or recipient not ready) */ | ||
1252 | HV_SENT, /**< Successfully sent */ | ||
1253 | HV_BAD_RECIP /**< Bad recipient coordinates (permanent error) */ | ||
1254 | } HV_Recip_State; | ||
1255 | |||
1256 | /** Message recipient. */ | ||
1257 | typedef struct | ||
1258 | { | ||
1259 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
1260 | unsigned int x:11; | ||
1261 | |||
1262 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
1263 | unsigned int y:11; | ||
1264 | |||
1265 | /** Status of this recipient */ | ||
1266 | HV_Recip_State state:10; | ||
1267 | } HV_Recipient; | ||
1268 | |||
1269 | /** Send a message to a set of recipients. | ||
1270 | * | ||
1271 | * This routine sends a message to a set of recipients. | ||
1272 | * | ||
1273 | * recips is an array of HV_Recipient structures. Each specifies a tile, | ||
1274 | * and a message state; initially, it is expected that the state will | ||
1275 | * be set to HV_TO_BE_SENT. nrecip specifies the number of recipients | ||
1276 | * in the recips array. | ||
1277 | * | ||
1278 | * For each recipient whose state is HV_TO_BE_SENT, the hypervisor attempts | ||
1279 | * to send that tile the specified message. In order to successfully | ||
1280 | * receive the message, the receiver must be a valid tile to which the | ||
1281 | * sender has access, must not be the sending tile itself, and must have | ||
1282 | * sufficient free buffer space. (The hypervisor guarantees that each | ||
1283 | * tile which has called hv_register_message_state() will be able to | ||
1284 | * buffer one message from every other tile which can legally send to it; | ||
1285 | * more space may be provided but is not guaranteed.) If an invalid tile | ||
1286 | * is specified, the recipient's state is set to HV_BAD_RECIP; this is a | ||
1287 | * permanent delivery error. If the message is successfully delivered | ||
1288 | * to the recipient's buffer, the recipient's state is set to HV_SENT. | ||
1289 | * Otherwise, the recipient's state is unchanged. Message delivery is | ||
1290 | * synchronous; all attempts to send messages are completed before this | ||
1291 | * routine returns. | ||
1292 | * | ||
1293 | * If no permanent delivery errors were encountered, the routine returns | ||
1294 | * the number of messages successfully sent: that is, the number of | ||
1295 | * recipients whose states changed from HV_TO_BE_SENT to HV_SENT during | ||
1296 | * this operation. If any permanent delivery errors were encountered, | ||
1297 | * the routine returns HV_ERECIP. In the event of permanent delivery | ||
1298 | * errors, it may be the case that delivery was not attempted to all | ||
1299 | * recipients; if any messages were succesfully delivered, however, | ||
1300 | * recipients' state values will be updated appropriately. | ||
1301 | * | ||
1302 | * It is explicitly legal to specify a recipient structure whose state | ||
1303 | * is not HV_TO_BE_SENT; such a recipient is ignored. One suggested way | ||
1304 | * of using hv_send_message to send a message to multiple tiles is to set | ||
1305 | * up a list of recipients, and then call the routine repeatedly with the | ||
1306 | * same list, each time accumulating the number of messages successfully | ||
1307 | * sent, until all messages are sent, a permanent error is encountered, | ||
1308 | * or the desired number of attempts have been made. When used in this | ||
1309 | * way, the routine will deliver each message no more than once to each | ||
1310 | * recipient. | ||
1311 | * | ||
1312 | * Note that a message being successfully delivered to the recipient's | ||
1313 | * buffer space does not guarantee that it is received by the recipient, | ||
1314 | * either immediately or at any time in the future; the recipient might | ||
1315 | * never call hv_receive_message, or could register a different state | ||
1316 | * buffer, losing the message. | ||
1317 | * | ||
1318 | * Specifiying the same recipient more than once in the recipient list | ||
1319 | * is an error, which will not result in an error return but which may | ||
1320 | * or may not result in more than one message being delivered to the | ||
1321 | * recipient tile. | ||
1322 | * | ||
1323 | * buf and buflen specify the message to be sent. buf is a virtual address | ||
1324 | * which must be currently mapped in the client's page table; if not, the | ||
1325 | * routine returns HV_EFAULT. buflen must be greater than zero and less | ||
1326 | * than or equal to HV_MAX_MESSAGE_SIZE, and nrecip must be less than the | ||
1327 | * number of tiles to which the sender has access; if not, the routine | ||
1328 | * returns HV_EINVAL. | ||
1329 | * @param recips List of recipients. | ||
1330 | * @param nrecip Number of recipients. | ||
1331 | * @param buf Address of message data. | ||
1332 | * @param buflen Length of message data. | ||
1333 | **/ | ||
1334 | int hv_send_message(HV_Recipient *recips, int nrecip, | ||
1335 | HV_VirtAddr buf, int buflen); | ||
1336 | |||
1337 | /** Maximum hypervisor message size, in bytes */ | ||
1338 | #define HV_MAX_MESSAGE_SIZE 28 | ||
1339 | |||
1340 | |||
1341 | /** Return value from hv_receive_message() */ | ||
1342 | typedef struct | ||
1343 | { | ||
1344 | int msglen; /**< Message length in bytes, or an error code */ | ||
1345 | __hv32 source; /**< Code identifying message sender (HV_MSG_xxx) */ | ||
1346 | } HV_RcvMsgInfo; | ||
1347 | |||
1348 | #define HV_MSG_TILE 0x0 /**< Message source is another tile */ | ||
1349 | #define HV_MSG_INTR 0x1 /**< Message source is a driver interrupt */ | ||
1350 | |||
1351 | /** Receive a message. | ||
1352 | * | ||
1353 | * This routine retrieves a message from the client's incoming message | ||
1354 | * buffer. | ||
1355 | * | ||
1356 | * Multiple messages sent from a particular sending tile to a particular | ||
1357 | * receiving tile are received in the order that they were sent; however, | ||
1358 | * no ordering is guaranteed between messages sent by different tiles. | ||
1359 | * | ||
1360 | * Whenever the a client's message buffer is empty, the first message | ||
1361 | * subsequently received will cause the client's MESSAGE_RCV_DWNCL | ||
1362 | * interrupt vector to be invoked through the interrupt downcall mechanism | ||
1363 | * (see the description of the hv_downcall_dispatch() routine for details | ||
1364 | * on downcalls). | ||
1365 | * | ||
1366 | * Another message-available downcall will not occur until a call to | ||
1367 | * this routine is made when the message buffer is empty, and a message | ||
1368 | * subsequently arrives. Note that such a downcall could occur while | ||
1369 | * this routine is executing. If the calling code does not wish this | ||
1370 | * to happen, it is recommended that this routine be called with the | ||
1371 | * INTCTRL_1 interrupt masked, or inside an interrupt critical section. | ||
1372 | * | ||
1373 | * msgstate is the value previously passed to hv_register_message_state(). | ||
1374 | * buf is the virtual address of the buffer into which the message will | ||
1375 | * be written; buflen is the length of the buffer. | ||
1376 | * | ||
1377 | * This routine returns an HV_RcvMsgInfo structure. The msglen member | ||
1378 | * of that structure is the length of the message received, zero if no | ||
1379 | * message is available, or HV_E2BIG if the message is too large for the | ||
1380 | * specified buffer. If the message is too large, it is not consumed, | ||
1381 | * and may be retrieved by a subsequent call to this routine specifying | ||
1382 | * a sufficiently large buffer. A buffer which is HV_MAX_MESSAGE_SIZE | ||
1383 | * bytes long is guaranteed to be able to receive any possible message. | ||
1384 | * | ||
1385 | * The source member of the HV_RcvMsgInfo structure describes the sender | ||
1386 | * of the message. For messages sent by another client tile via an | ||
1387 | * hv_send_message() call, this value is HV_MSG_TILE; for messages sent | ||
1388 | * as a result of a device interrupt, this value is HV_MSG_INTR. | ||
1389 | */ | ||
1390 | |||
1391 | HV_RcvMsgInfo hv_receive_message(HV_MsgState msgstate, HV_VirtAddr buf, | ||
1392 | int buflen); | ||
1393 | |||
1394 | |||
1395 | /** Start remaining tiles owned by this supervisor. Initially, only one tile | ||
1396 | * executes the client program; after it calls this service, the other tiles | ||
1397 | * are started. This allows the initial tile to do one-time configuration | ||
1398 | * of shared data structures without having to lock them against simultaneous | ||
1399 | * access. | ||
1400 | */ | ||
1401 | void hv_start_all_tiles(void); | ||
1402 | |||
1403 | |||
1404 | /** Open a hypervisor device. | ||
1405 | * | ||
1406 | * This service initializes an I/O device and its hypervisor driver software, | ||
1407 | * and makes it available for use. The open operation is per-device per-chip; | ||
1408 | * once it has been performed, the device handle returned may be used in other | ||
1409 | * device services calls made by any tile. | ||
1410 | * | ||
1411 | * @param name Name of the device. A base device name is just a text string | ||
1412 | * (say, "pcie"). If there is more than one instance of a device, the | ||
1413 | * base name is followed by a slash and a device number (say, "pcie/0"). | ||
1414 | * Some devices may support further structure beneath those components; | ||
1415 | * most notably, devices which require control operations do so by | ||
1416 | * supporting reads and/or writes to a control device whose name | ||
1417 | * includes a trailing "/ctl" (say, "pcie/0/ctl"). | ||
1418 | * @param flags Flags (HV_DEV_xxx). | ||
1419 | * @return A positive integer device handle, or a negative error code. | ||
1420 | */ | ||
1421 | int hv_dev_open(HV_VirtAddr name, __hv32 flags); | ||
1422 | |||
1423 | |||
1424 | /** Close a hypervisor device. | ||
1425 | * | ||
1426 | * This service uninitializes an I/O device and its hypervisor driver | ||
1427 | * software, and makes it unavailable for use. The close operation is | ||
1428 | * per-device per-chip; once it has been performed, the device is no longer | ||
1429 | * available. Normally there is no need to ever call the close service. | ||
1430 | * | ||
1431 | * @param devhdl Device handle of the device to be closed. | ||
1432 | * @return Zero if the close is successful, otherwise, a negative error code. | ||
1433 | */ | ||
1434 | int hv_dev_close(int devhdl); | ||
1435 | |||
1436 | |||
1437 | /** Read data from a hypervisor device synchronously. | ||
1438 | * | ||
1439 | * This service transfers data from a hypervisor device to a memory buffer. | ||
1440 | * When the service returns, the data has been written from the memory buffer, | ||
1441 | * and the buffer will not be further modified by the driver. | ||
1442 | * | ||
1443 | * No ordering is guaranteed between requests issued from different tiles. | ||
1444 | * | ||
1445 | * Devices may choose to support both the synchronous and asynchronous read | ||
1446 | * operations, only one of them, or neither of them. | ||
1447 | * | ||
1448 | * @param devhdl Device handle of the device to be read from. | ||
1449 | * @param flags Flags (HV_DEV_xxx). | ||
1450 | * @param va Virtual address of the target data buffer. This buffer must | ||
1451 | * be mapped in the currently installed page table; if not, HV_EFAULT | ||
1452 | * may be returned. | ||
1453 | * @param len Number of bytes to be transferred. | ||
1454 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1455 | * often a byte offset from the beginning of the device; in other cases, | ||
1456 | * like on a control device, it may have a different meaning. | ||
1457 | * @return A non-negative value if the read was at least partially successful; | ||
1458 | * otherwise, a negative error code. The precise interpretation of | ||
1459 | * the return value is driver-dependent, but many drivers will return | ||
1460 | * the number of bytes successfully transferred. | ||
1461 | */ | ||
1462 | int hv_dev_pread(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, | ||
1463 | __hv64 offset); | ||
1464 | |||
1465 | #define HV_DEV_NB_EMPTY 0x1 /**< Don't block when no bytes of data can | ||
1466 | be transferred. */ | ||
1467 | #define HV_DEV_NB_PARTIAL 0x2 /**< Don't block when some bytes, but not all | ||
1468 | of the requested bytes, can be | ||
1469 | transferred. */ | ||
1470 | #define HV_DEV_NOCACHE 0x4 /**< The caller warrants that none of the | ||
1471 | cache lines which might contain data | ||
1472 | from the requested buffer are valid. | ||
1473 | Useful with asynchronous operations | ||
1474 | only. */ | ||
1475 | |||
1476 | #define HV_DEV_ALLFLAGS (HV_DEV_NB_EMPTY | HV_DEV_NB_PARTIAL | \ | ||
1477 | HV_DEV_NOCACHE) /**< All HV_DEV_xxx flags */ | ||
1478 | |||
1479 | /** Write data to a hypervisor device synchronously. | ||
1480 | * | ||
1481 | * This service transfers data from a memory buffer to a hypervisor device. | ||
1482 | * When the service returns, the data has been read from the memory buffer, | ||
1483 | * and the buffer may be overwritten by the client; the data may not | ||
1484 | * necessarily have been conveyed to the actual hardware I/O interface. | ||
1485 | * | ||
1486 | * No ordering is guaranteed between requests issued from different tiles. | ||
1487 | * | ||
1488 | * Devices may choose to support both the synchronous and asynchronous write | ||
1489 | * operations, only one of them, or neither of them. | ||
1490 | * | ||
1491 | * @param devhdl Device handle of the device to be written to. | ||
1492 | * @param flags Flags (HV_DEV_xxx). | ||
1493 | * @param va Virtual address of the source data buffer. This buffer must | ||
1494 | * be mapped in the currently installed page table; if not, HV_EFAULT | ||
1495 | * may be returned. | ||
1496 | * @param len Number of bytes to be transferred. | ||
1497 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1498 | * often a byte offset from the beginning of the device; in other cases, | ||
1499 | * like on a control device, it may have a different meaning. | ||
1500 | * @return A non-negative value if the write was at least partially successful; | ||
1501 | * otherwise, a negative error code. The precise interpretation of | ||
1502 | * the return value is driver-dependent, but many drivers will return | ||
1503 | * the number of bytes successfully transferred. | ||
1504 | */ | ||
1505 | int hv_dev_pwrite(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, | ||
1506 | __hv64 offset); | ||
1507 | |||
1508 | |||
1509 | /** Interrupt arguments, used in the asynchronous I/O interfaces. */ | ||
1510 | #if CHIP_VA_WIDTH() > 32 | ||
1511 | typedef __hv64 HV_IntArg; | ||
1512 | #else | ||
1513 | typedef __hv32 HV_IntArg; | ||
1514 | #endif | ||
1515 | |||
1516 | /** Interrupt messages are delivered via the mechanism as normal messages, | ||
1517 | * but have a message source of HV_DEV_INTR. The message is formatted | ||
1518 | * as an HV_IntrMsg structure. | ||
1519 | */ | ||
1520 | |||
1521 | typedef struct | ||
1522 | { | ||
1523 | HV_IntArg intarg; /**< Interrupt argument, passed to the poll/preada/pwritea | ||
1524 | services */ | ||
1525 | HV_IntArg intdata; /**< Interrupt-specific interrupt data */ | ||
1526 | } HV_IntrMsg; | ||
1527 | |||
1528 | /** Request an interrupt message when a device condition is satisfied. | ||
1529 | * | ||
1530 | * This service requests that an interrupt message be delivered to the | ||
1531 | * requesting tile when a device becomes readable or writable, or when any | ||
1532 | * data queued to the device via previous write operations from this tile | ||
1533 | * has been actually sent out on the hardware I/O interface. Devices may | ||
1534 | * choose to support any, all, or none of the available conditions. | ||
1535 | * | ||
1536 | * If multiple conditions are specified, only one message will be | ||
1537 | * delivered. If the event mask delivered to that interrupt handler | ||
1538 | * indicates that some of the conditions have not yet occurred, the | ||
1539 | * client must issue another poll() call if it wishes to wait for those | ||
1540 | * conditions. | ||
1541 | * | ||
1542 | * Only one poll may be outstanding per device handle per tile. If more than | ||
1543 | * one tile is polling on the same device and condition, they will all be | ||
1544 | * notified when it happens. Because of this, clients may not assume that | ||
1545 | * the condition signaled is necessarily still true when they request a | ||
1546 | * subsequent service; for instance, the readable data which caused the | ||
1547 | * poll call to interrupt may have been read by another tile in the interim. | ||
1548 | * | ||
1549 | * The notification interrupt message could come directly, or via the | ||
1550 | * downcall (intctrl1) method, depending on what the tile is doing | ||
1551 | * when the condition is satisfied. Note that it is possible for the | ||
1552 | * requested interrupt to be delivered after this service is called but | ||
1553 | * before it returns. | ||
1554 | * | ||
1555 | * @param devhdl Device handle of the device to be polled. | ||
1556 | * @param events Flags denoting the events which will cause the interrupt to | ||
1557 | * be delivered (HV_DEVPOLL_xxx). | ||
1558 | * @param intarg Value which will be delivered as the intarg member of the | ||
1559 | * eventual interrupt message; the intdata member will be set to a | ||
1560 | * mask of HV_DEVPOLL_xxx values indicating which conditions have been | ||
1561 | * satisifed. | ||
1562 | * @return Zero if the interrupt was successfully scheduled; otherwise, a | ||
1563 | * negative error code. | ||
1564 | */ | ||
1565 | int hv_dev_poll(int devhdl, __hv32 events, HV_IntArg intarg); | ||
1566 | |||
1567 | #define HV_DEVPOLL_READ 0x1 /**< Test device for readability */ | ||
1568 | #define HV_DEVPOLL_WRITE 0x2 /**< Test device for writability */ | ||
1569 | #define HV_DEVPOLL_FLUSH 0x4 /**< Test device for output drained */ | ||
1570 | |||
1571 | |||
1572 | /** Cancel a request for an interrupt when a device event occurs. | ||
1573 | * | ||
1574 | * This service requests that no interrupt be delivered when the events | ||
1575 | * noted in the last-issued poll() call happen. Once this service returns, | ||
1576 | * the interrupt has been canceled; however, it is possible for the interrupt | ||
1577 | * to be delivered after this service is called but before it returns. | ||
1578 | * | ||
1579 | * @param devhdl Device handle of the device on which to cancel polling. | ||
1580 | * @return Zero if the poll was successfully canceled; otherwise, a negative | ||
1581 | * error code. | ||
1582 | */ | ||
1583 | int hv_dev_poll_cancel(int devhdl); | ||
1584 | |||
1585 | |||
1586 | /** Scatter-gather list for preada/pwritea calls. */ | ||
1587 | typedef struct | ||
1588 | #if CHIP_VA_WIDTH() <= 32 | ||
1589 | __attribute__ ((packed, aligned(4))) | ||
1590 | #endif | ||
1591 | { | ||
1592 | HV_PhysAddr pa; /**< Client physical address of the buffer segment. */ | ||
1593 | HV_PTE pte; /**< Page table entry describing the caching and location | ||
1594 | override characteristics of the buffer segment. Some | ||
1595 | drivers ignore this element and will require that | ||
1596 | the NOCACHE flag be set on their requests. */ | ||
1597 | __hv32 len; /**< Length of the buffer segment. */ | ||
1598 | } HV_SGL; | ||
1599 | |||
1600 | #define HV_SGL_MAXLEN 16 /**< Maximum number of entries in a scatter-gather | ||
1601 | list */ | ||
1602 | |||
1603 | /** Read data from a hypervisor device asynchronously. | ||
1604 | * | ||
1605 | * This service transfers data from a hypervisor device to a memory buffer. | ||
1606 | * When the service returns, the read has been scheduled. When the read | ||
1607 | * completes, an interrupt message will be delivered, and the buffer will | ||
1608 | * not be further modified by the driver. | ||
1609 | * | ||
1610 | * The number of possible outstanding asynchronous requests is defined by | ||
1611 | * each driver, but it is recommended that it be at least two requests | ||
1612 | * per tile per device. | ||
1613 | * | ||
1614 | * No ordering is guaranteed between synchronous and asynchronous requests, | ||
1615 | * even those issued on the same tile. | ||
1616 | * | ||
1617 | * The completion interrupt message could come directly, or via the downcall | ||
1618 | * (intctrl1) method, depending on what the tile is doing when the read | ||
1619 | * completes. Interrupts do not coalesce; one is delivered for each | ||
1620 | * asynchronous I/O request. Note that it is possible for the requested | ||
1621 | * interrupt to be delivered after this service is called but before it | ||
1622 | * returns. | ||
1623 | * | ||
1624 | * Devices may choose to support both the synchronous and asynchronous read | ||
1625 | * operations, only one of them, or neither of them. | ||
1626 | * | ||
1627 | * @param devhdl Device handle of the device to be read from. | ||
1628 | * @param flags Flags (HV_DEV_xxx). | ||
1629 | * @param sgl_len Number of elements in the scatter-gather list. | ||
1630 | * @param sgl Scatter-gather list describing the memory to which data will be | ||
1631 | * written. | ||
1632 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1633 | * often a byte offset from the beginning of the device; in other cases, | ||
1634 | * like on a control device, it may have a different meaning. | ||
1635 | * @param intarg Value which will be delivered as the intarg member of the | ||
1636 | * eventual interrupt message; the intdata member will be set to the | ||
1637 | * normal return value from the read request. | ||
1638 | * @return Zero if the read was successfully scheduled; otherwise, a negative | ||
1639 | * error code. Note that some drivers may choose to pre-validate | ||
1640 | * their arguments, and may thus detect certain device error | ||
1641 | * conditions at this time rather than when the completion notification | ||
1642 | * occurs, but this is not required. | ||
1643 | */ | ||
1644 | int hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
1645 | HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg); | ||
1646 | |||
1647 | |||
1648 | /** Write data to a hypervisor device asynchronously. | ||
1649 | * | ||
1650 | * This service transfers data from a memory buffer to a hypervisor | ||
1651 | * device. When the service returns, the write has been scheduled. | ||
1652 | * When the write completes, an interrupt message will be delivered, | ||
1653 | * and the buffer may be overwritten by the client; the data may not | ||
1654 | * necessarily have been conveyed to the actual hardware I/O interface. | ||
1655 | * | ||
1656 | * The number of possible outstanding asynchronous requests is defined by | ||
1657 | * each driver, but it is recommended that it be at least two requests | ||
1658 | * per tile per device. | ||
1659 | * | ||
1660 | * No ordering is guaranteed between synchronous and asynchronous requests, | ||
1661 | * even those issued on the same tile. | ||
1662 | * | ||
1663 | * The completion interrupt message could come directly, or via the downcall | ||
1664 | * (intctrl1) method, depending on what the tile is doing when the read | ||
1665 | * completes. Interrupts do not coalesce; one is delivered for each | ||
1666 | * asynchronous I/O request. Note that it is possible for the requested | ||
1667 | * interrupt to be delivered after this service is called but before it | ||
1668 | * returns. | ||
1669 | * | ||
1670 | * Devices may choose to support both the synchronous and asynchronous write | ||
1671 | * operations, only one of them, or neither of them. | ||
1672 | * | ||
1673 | * @param devhdl Device handle of the device to be read from. | ||
1674 | * @param flags Flags (HV_DEV_xxx). | ||
1675 | * @param sgl_len Number of elements in the scatter-gather list. | ||
1676 | * @param sgl Scatter-gather list describing the memory from which data will be | ||
1677 | * read. | ||
1678 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1679 | * often a byte offset from the beginning of the device; in other cases, | ||
1680 | * like on a control device, it may have a different meaning. | ||
1681 | * @param intarg Value which will be delivered as the intarg member of the | ||
1682 | * eventual interrupt message; the intdata member will be set to the | ||
1683 | * normal return value from the write request. | ||
1684 | * @return Zero if the write was successfully scheduled; otherwise, a negative | ||
1685 | * error code. Note that some drivers may choose to pre-validate | ||
1686 | * their arguments, and may thus detect certain device error | ||
1687 | * conditions at this time rather than when the completion notification | ||
1688 | * occurs, but this is not required. | ||
1689 | */ | ||
1690 | int hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
1691 | HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg); | ||
1692 | |||
1693 | |||
1694 | /** Define a pair of tile and ASID to identify a user process context. */ | ||
1695 | typedef struct | ||
1696 | { | ||
1697 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
1698 | unsigned int x:11; | ||
1699 | |||
1700 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
1701 | unsigned int y:11; | ||
1702 | |||
1703 | /** ASID of the process on this x,y tile */ | ||
1704 | HV_ASID asid:10; | ||
1705 | } HV_Remote_ASID; | ||
1706 | |||
1707 | /** Flush cache and/or TLB state on remote tiles. | ||
1708 | * | ||
1709 | * @param cache_pa Client physical address to flush from cache (ignored if | ||
1710 | * the length encoded in cache_control is zero, or if | ||
1711 | * HV_FLUSH_EVICT_L2 is set, or if cache_cpumask is NULL). | ||
1712 | * @param cache_control This argument allows you to specify a length of | ||
1713 | * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). | ||
1714 | * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. | ||
1715 | * You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache. | ||
1716 | * HV_FLUSH_ALL flushes all caches. | ||
1717 | * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of | ||
1718 | * tile indices to perform cache flush on. The low bit of the first | ||
1719 | * word corresponds to the tile at the upper left-hand corner of the | ||
1720 | * supervisor's rectangle. If passed as a NULL pointer, equivalent | ||
1721 | * to an empty bitmask. On chips which support hash-for-home caching, | ||
1722 | * if passed as -1, equivalent to a mask containing tiles which could | ||
1723 | * be doing hash-for-home caching. | ||
1724 | * @param tlb_va Virtual address to flush from TLB (ignored if | ||
1725 | * tlb_length is zero or tlb_cpumask is NULL). | ||
1726 | * @param tlb_length Number of bytes of data to flush from the TLB. | ||
1727 | * @param tlb_pgsize Page size to use for TLB flushes. | ||
1728 | * tlb_va and tlb_length need not be aligned to this size. | ||
1729 | * @param tlb_cpumask Bitmask for tlb flush, like cache_cpumask. | ||
1730 | * If passed as a NULL pointer, equivalent to an empty bitmask. | ||
1731 | * @param asids Pointer to an HV_Remote_ASID array of tile/ASID pairs to flush. | ||
1732 | * @param asidcount Number of HV_Remote_ASID entries in asids[]. | ||
1733 | * @return Zero for success, or else HV_EINVAL or HV_EFAULT for errors that | ||
1734 | * are detected while parsing the arguments. | ||
1735 | */ | ||
1736 | int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control, | ||
1737 | unsigned long* cache_cpumask, | ||
1738 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
1739 | unsigned long tlb_pgsize, unsigned long* tlb_cpumask, | ||
1740 | HV_Remote_ASID* asids, int asidcount); | ||
1741 | |||
1742 | /** Include in cache_control to ensure a flush of the entire L2. */ | ||
1743 | #define HV_FLUSH_EVICT_L2 (1UL << 31) | ||
1744 | |||
1745 | /** Include in cache_control to ensure a flush of the entire L1I. */ | ||
1746 | #define HV_FLUSH_EVICT_L1I (1UL << 30) | ||
1747 | |||
1748 | /** Maximum legal size to use for the "length" component of cache_control. */ | ||
1749 | #define HV_FLUSH_MAX_CACHE_LEN ((1UL << 30) - 1) | ||
1750 | |||
1751 | /** Use for cache_control to ensure a flush of all caches. */ | ||
1752 | #define HV_FLUSH_ALL -1UL | ||
1753 | |||
1754 | #else /* __ASSEMBLER__ */ | ||
1755 | |||
1756 | /** Include in cache_control to ensure a flush of the entire L2. */ | ||
1757 | #define HV_FLUSH_EVICT_L2 (1 << 31) | ||
1758 | |||
1759 | /** Include in cache_control to ensure a flush of the entire L1I. */ | ||
1760 | #define HV_FLUSH_EVICT_L1I (1 << 30) | ||
1761 | |||
1762 | /** Maximum legal size to use for the "length" component of cache_control. */ | ||
1763 | #define HV_FLUSH_MAX_CACHE_LEN ((1 << 30) - 1) | ||
1764 | |||
1765 | /** Use for cache_control to ensure a flush of all caches. */ | ||
1766 | #define HV_FLUSH_ALL -1 | ||
1767 | |||
1768 | #endif /* __ASSEMBLER__ */ | ||
1769 | |||
1770 | #ifndef __ASSEMBLER__ | ||
1771 | |||
1772 | /** Return a 64-bit value corresponding to the PTE if needed */ | ||
1773 | #define hv_pte_val(pte) ((pte).val) | ||
1774 | |||
1775 | /** Cast a 64-bit value to an HV_PTE */ | ||
1776 | #define hv_pte(val) ((HV_PTE) { val }) | ||
1777 | |||
1778 | #endif /* !__ASSEMBLER__ */ | ||
1779 | |||
1780 | |||
1781 | /** Bits in the size of an HV_PTE */ | ||
1782 | #define HV_LOG2_PTE_SIZE 3 | ||
1783 | |||
1784 | /** Size of an HV_PTE */ | ||
1785 | #define HV_PTE_SIZE (1 << HV_LOG2_PTE_SIZE) | ||
1786 | |||
1787 | |||
1788 | /* Bits in HV_PTE's low word. */ | ||
1789 | #define HV_PTE_INDEX_PRESENT 0 /**< PTE is valid */ | ||
1790 | #define HV_PTE_INDEX_MIGRATING 1 /**< Page is migrating */ | ||
1791 | #define HV_PTE_INDEX_CLIENT0 2 /**< Page client state 0 */ | ||
1792 | #define HV_PTE_INDEX_CLIENT1 3 /**< Page client state 1 */ | ||
1793 | #define HV_PTE_INDEX_NC 4 /**< L1$/L2$ incoherent with L3$ */ | ||
1794 | #define HV_PTE_INDEX_NO_ALLOC_L1 5 /**< Page is uncached in local L1$ */ | ||
1795 | #define HV_PTE_INDEX_NO_ALLOC_L2 6 /**< Page is uncached in local L2$ */ | ||
1796 | #define HV_PTE_INDEX_CACHED_PRIORITY 7 /**< Page is priority cached */ | ||
1797 | #define HV_PTE_INDEX_PAGE 8 /**< PTE describes a page */ | ||
1798 | #define HV_PTE_INDEX_GLOBAL 9 /**< Page is global */ | ||
1799 | #define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */ | ||
1800 | #define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */ | ||
1801 | #define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */ | ||
1802 | /* Bits 13-15 are reserved for | ||
1803 | future use. */ | ||
1804 | #define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */ | ||
1805 | #define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */ | ||
1806 | /* Bit 19 is reserved for | ||
1807 | future use. */ | ||
1808 | #define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits | ||
1809 | of word */ | ||
1810 | #define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */ | ||
1811 | |||
1812 | /* Bits in HV_PTE's high word. */ | ||
1813 | #define HV_PTE_INDEX_READABLE 32 /**< Page is readable */ | ||
1814 | #define HV_PTE_INDEX_WRITABLE 33 /**< Page is writable */ | ||
1815 | #define HV_PTE_INDEX_EXECUTABLE 34 /**< Page is executable */ | ||
1816 | #define HV_PTE_INDEX_PTFN 35 /**< Page's PTFN; must be high bits | ||
1817 | of word */ | ||
1818 | #define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ | ||
1819 | |||
1820 | /** Position of the PFN field within the PTE (subset of the PTFN). */ | ||
1821 | #define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \ | ||
1822 | HV_LOG2_PAGE_TABLE_ALIGN)) | ||
1823 | |||
1824 | /** Length of the PFN field within the PTE (subset of the PTFN). */ | ||
1825 | #define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \ | ||
1826 | (HV_LOG2_PAGE_SIZE_SMALL - \ | ||
1827 | HV_LOG2_PAGE_TABLE_ALIGN)) | ||
1828 | |||
1829 | /* | ||
1830 | * Legal values for the PTE's mode field | ||
1831 | */ | ||
1832 | /** Data is not resident in any caches; loads and stores access memory | ||
1833 | * directly. | ||
1834 | */ | ||
1835 | #define HV_PTE_MODE_UNCACHED 1 | ||
1836 | |||
1837 | /** Data is resident in the tile's local L1 and/or L2 caches; if a load | ||
1838 | * or store misses there, it goes to memory. | ||
1839 | * | ||
1840 | * The copy in the local L1$/L2$ is not invalidated when the copy in | ||
1841 | * memory is changed. | ||
1842 | */ | ||
1843 | #define HV_PTE_MODE_CACHE_NO_L3 2 | ||
1844 | |||
1845 | /** Data is resident in the tile's local L1 and/or L2 caches. If a load | ||
1846 | * or store misses there, it goes to an L3 cache in a designated tile; | ||
1847 | * if it misses there, it goes to memory. | ||
1848 | * | ||
1849 | * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated | ||
1850 | * when the copy in the remote L3$ is changed. Otherwise, such | ||
1851 | * invalidation will not occur. | ||
1852 | * | ||
1853 | * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support | ||
1854 | * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is | ||
1855 | * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode. | ||
1856 | */ | ||
1857 | #define HV_PTE_MODE_CACHE_TILE_L3 3 | ||
1858 | |||
1859 | /** Data is resident in the tile's local L1 and/or L2 caches. If a load | ||
1860 | * or store misses there, it goes to an L3 cache in one of a set of | ||
1861 | * designated tiles; if it misses there, it goes to memory. Which tile | ||
1862 | * is chosen from the set depends upon a hash function applied to the | ||
1863 | * physical address. This mode is not supported on chips for which | ||
1864 | * CHIP_HAS_CBOX_HOME_MAP() is 0. | ||
1865 | * | ||
1866 | * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated | ||
1867 | * when the copy in the remote L3$ is changed. Otherwise, such | ||
1868 | * invalidation will not occur. | ||
1869 | * | ||
1870 | * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support | ||
1871 | * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is | ||
1872 | * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode. | ||
1873 | */ | ||
1874 | #define HV_PTE_MODE_CACHE_HASH_L3 4 | ||
1875 | |||
1876 | /** Data is not resident in memory; accesses are instead made to an I/O | ||
1877 | * device, whose tile coordinates are given by the PTE's LOTAR field. | ||
1878 | * This mode is only supported on chips for which CHIP_HAS_MMIO() is 1. | ||
1879 | * The EXECUTABLE bit may not be set in an MMIO PTE. | ||
1880 | */ | ||
1881 | #define HV_PTE_MODE_MMIO 5 | ||
1882 | |||
1883 | |||
1884 | /* C wants 1ULL so it is typed as __hv64, but the assembler needs just numbers. | ||
1885 | * The assembler can't handle shifts greater than 31, but treats them | ||
1886 | * as shifts mod 32, so assembler code must be aware of which word | ||
1887 | * the bit belongs in when using these macros. | ||
1888 | */ | ||
1889 | #ifdef __ASSEMBLER__ | ||
1890 | #define __HV_PTE_ONE 1 /**< One, for assembler */ | ||
1891 | #else | ||
1892 | #define __HV_PTE_ONE 1ULL /**< One, for C */ | ||
1893 | #endif | ||
1894 | |||
1895 | /** Is this PTE present? | ||
1896 | * | ||
1897 | * If this bit is set, this PTE represents a valid translation or level-2 | ||
1898 | * page table pointer. Otherwise, the page table does not contain a | ||
1899 | * translation for the subject virtual pages. | ||
1900 | * | ||
1901 | * If this bit is not set, the other bits in the PTE are not | ||
1902 | * interpreted by the hypervisor, and may contain any value. | ||
1903 | */ | ||
1904 | #define HV_PTE_PRESENT (__HV_PTE_ONE << HV_PTE_INDEX_PRESENT) | ||
1905 | |||
1906 | /** Does this PTE map a page? | ||
1907 | * | ||
1908 | * If this bit is set in the level-1 page table, the entry should be | ||
1909 | * interpreted as a level-2 page table entry mapping a large page. | ||
1910 | * | ||
1911 | * This bit should not be modified by the client while PRESENT is set, as | ||
1912 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1913 | * | ||
1914 | * In a level-2 page table, this bit is ignored and must be zero. | ||
1915 | */ | ||
1916 | #define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE) | ||
1917 | |||
1918 | /** Is this a global (non-ASID) mapping? | ||
1919 | * | ||
1920 | * If this bit is set, the translations established by this PTE will | ||
1921 | * not be flushed from the TLB by the hv_flush_asid() service; they | ||
1922 | * will be flushed by the hv_flush_page() or hv_flush_pages() services. | ||
1923 | * | ||
1924 | * Setting this bit for translations which are identical in all page | ||
1925 | * tables (for instance, code and data belonging to a client OS) can | ||
1926 | * be very beneficial, as it will reduce the number of TLB misses. | ||
1927 | * Note that, while it is not an error which will be detected by the | ||
1928 | * hypervisor, it is an extremely bad idea to set this bit for | ||
1929 | * translations which are _not_ identical in all page tables. | ||
1930 | * | ||
1931 | * This bit should not be modified by the client while PRESENT is set, as | ||
1932 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1933 | * | ||
1934 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1935 | */ | ||
1936 | #define HV_PTE_GLOBAL (__HV_PTE_ONE << HV_PTE_INDEX_GLOBAL) | ||
1937 | |||
1938 | /** Is this mapping accessible to users? | ||
1939 | * | ||
1940 | * If this bit is set, code running at any PL will be permitted to | ||
1941 | * access the virtual addresses mapped by this PTE. Otherwise, only | ||
1942 | * code running at PL 1 or above will be allowed to do so. | ||
1943 | * | ||
1944 | * This bit should not be modified by the client while PRESENT is set, as | ||
1945 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1946 | * | ||
1947 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1948 | */ | ||
1949 | #define HV_PTE_USER (__HV_PTE_ONE << HV_PTE_INDEX_USER) | ||
1950 | |||
1951 | /** Has this mapping been accessed? | ||
1952 | * | ||
1953 | * This bit is set by the hypervisor when the memory described by the | ||
1954 | * translation is accessed for the first time. It is never cleared by | ||
1955 | * the hypervisor, but may be cleared by the client. After the bit | ||
1956 | * has been cleared, subsequent references are not guaranteed to set | ||
1957 | * it again until the translation has been flushed from the TLB. | ||
1958 | * | ||
1959 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1960 | */ | ||
1961 | #define HV_PTE_ACCESSED (__HV_PTE_ONE << HV_PTE_INDEX_ACCESSED) | ||
1962 | |||
1963 | /** Is this mapping dirty? | ||
1964 | * | ||
1965 | * This bit is set by the hypervisor when the memory described by the | ||
1966 | * translation is written for the first time. It is never cleared by | ||
1967 | * the hypervisor, but may be cleared by the client. After the bit | ||
1968 | * has been cleared, subsequent references are not guaranteed to set | ||
1969 | * it again until the translation has been flushed from the TLB. | ||
1970 | * | ||
1971 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1972 | */ | ||
1973 | #define HV_PTE_DIRTY (__HV_PTE_ONE << HV_PTE_INDEX_DIRTY) | ||
1974 | |||
1975 | /** Migrating bit in PTE. | ||
1976 | * | ||
1977 | * This bit is guaranteed not to be inspected or modified by the | ||
1978 | * hypervisor. The name is indicative of the suggested use by the client | ||
1979 | * to tag pages whose L3 cache is being migrated from one cpu to another. | ||
1980 | */ | ||
1981 | #define HV_PTE_MIGRATING (__HV_PTE_ONE << HV_PTE_INDEX_MIGRATING) | ||
1982 | |||
1983 | /** Client-private bit in PTE. | ||
1984 | * | ||
1985 | * This bit is guaranteed not to be inspected or modified by the | ||
1986 | * hypervisor. | ||
1987 | */ | ||
1988 | #define HV_PTE_CLIENT0 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT0) | ||
1989 | |||
1990 | /** Client-private bit in PTE. | ||
1991 | * | ||
1992 | * This bit is guaranteed not to be inspected or modified by the | ||
1993 | * hypervisor. | ||
1994 | */ | ||
1995 | #define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1) | ||
1996 | |||
1997 | /** Non-coherent (NC) bit in PTE. | ||
1998 | * | ||
1999 | * If this bit is set, the mapping that is set up will be non-coherent | ||
2000 | * (also known as non-inclusive). This means that changes to the L3 | ||
2001 | * cache will not cause a local copy to be invalidated. It is generally | ||
2002 | * recommended only for read-only mappings. | ||
2003 | * | ||
2004 | * In level-1 PTEs, if the Page bit is clear, this bit determines how the | ||
2005 | * level-2 page table is accessed. | ||
2006 | */ | ||
2007 | #define HV_PTE_NC (__HV_PTE_ONE << HV_PTE_INDEX_NC) | ||
2008 | |||
2009 | /** Is this page prevented from filling the L1$? | ||
2010 | * | ||
2011 | * If this bit is set, the page described by the PTE will not be cached | ||
2012 | * the local cpu's L1 cache. | ||
2013 | * | ||
2014 | * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip, | ||
2015 | * it is illegal to use this attribute, and may cause client termination. | ||
2016 | * | ||
2017 | * In level-1 PTEs, if the Page bit is clear, this bit | ||
2018 | * determines how the level-2 page table is accessed. | ||
2019 | */ | ||
2020 | #define HV_PTE_NO_ALLOC_L1 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L1) | ||
2021 | |||
2022 | /** Is this page prevented from filling the L2$? | ||
2023 | * | ||
2024 | * If this bit is set, the page described by the PTE will not be cached | ||
2025 | * the local cpu's L2 cache. | ||
2026 | * | ||
2027 | * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip, | ||
2028 | * it is illegal to use this attribute, and may cause client termination. | ||
2029 | * | ||
2030 | * In level-1 PTEs, if the Page bit is clear, this bit determines how the | ||
2031 | * level-2 page table is accessed. | ||
2032 | */ | ||
2033 | #define HV_PTE_NO_ALLOC_L2 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L2) | ||
2034 | |||
2035 | /** Is this a priority page? | ||
2036 | * | ||
2037 | * If this bit is set, the page described by the PTE will be given | ||
2038 | * priority in the cache. Normally this translates into allowing the | ||
2039 | * page to use only the "red" half of the cache. The client may wish to | ||
2040 | * then use the hv_set_caching service to specify that other pages which | ||
2041 | * alias this page will use only the "black" half of the cache. | ||
2042 | * | ||
2043 | * If the Cached Priority bit is clear, the hypervisor uses the | ||
2044 | * current hv_set_caching() value to choose how to cache the page. | ||
2045 | * | ||
2046 | * It is illegal to set the Cached Priority bit if the Non-Cached bit | ||
2047 | * is set and the Cached Remotely bit is clear, i.e. if requests to | ||
2048 | * the page map directly to memory. | ||
2049 | * | ||
2050 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2051 | */ | ||
2052 | #define HV_PTE_CACHED_PRIORITY (__HV_PTE_ONE << \ | ||
2053 | HV_PTE_INDEX_CACHED_PRIORITY) | ||
2054 | |||
2055 | /** Is this a readable mapping? | ||
2056 | * | ||
2057 | * If this bit is set, code will be permitted to read from (e.g., | ||
2058 | * issue load instructions against) the virtual addresses mapped by | ||
2059 | * this PTE. | ||
2060 | * | ||
2061 | * It is illegal for this bit to be clear if the Writable bit is set. | ||
2062 | * | ||
2063 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2064 | */ | ||
2065 | #define HV_PTE_READABLE (__HV_PTE_ONE << HV_PTE_INDEX_READABLE) | ||
2066 | |||
2067 | /** Is this a writable mapping? | ||
2068 | * | ||
2069 | * If this bit is set, code will be permitted to write to (e.g., issue | ||
2070 | * store instructions against) the virtual addresses mapped by this | ||
2071 | * PTE. | ||
2072 | * | ||
2073 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2074 | */ | ||
2075 | #define HV_PTE_WRITABLE (__HV_PTE_ONE << HV_PTE_INDEX_WRITABLE) | ||
2076 | |||
2077 | /** Is this an executable mapping? | ||
2078 | * | ||
2079 | * If this bit is set, code will be permitted to execute from | ||
2080 | * (e.g., jump to) the virtual addresses mapped by this PTE. | ||
2081 | * | ||
2082 | * This bit applies to any processor on the tile, if there are more | ||
2083 | * than one. | ||
2084 | * | ||
2085 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2086 | */ | ||
2087 | #define HV_PTE_EXECUTABLE (__HV_PTE_ONE << HV_PTE_INDEX_EXECUTABLE) | ||
2088 | |||
2089 | /** The width of a LOTAR's x or y bitfield. */ | ||
2090 | #define HV_LOTAR_WIDTH 11 | ||
2091 | |||
2092 | /** Converts an x,y pair to a LOTAR value. */ | ||
2093 | #define HV_XY_TO_LOTAR(x, y) ((HV_LOTAR)(((x) << HV_LOTAR_WIDTH) | (y))) | ||
2094 | |||
2095 | /** Extracts the X component of a lotar. */ | ||
2096 | #define HV_LOTAR_X(lotar) ((lotar) >> HV_LOTAR_WIDTH) | ||
2097 | |||
2098 | /** Extracts the Y component of a lotar. */ | ||
2099 | #define HV_LOTAR_Y(lotar) ((lotar) & ((1 << HV_LOTAR_WIDTH) - 1)) | ||
2100 | |||
2101 | #ifndef __ASSEMBLER__ | ||
2102 | |||
2103 | /** Define accessor functions for a PTE bit. */ | ||
2104 | #define _HV_BIT(name, bit) \ | ||
2105 | static __inline int \ | ||
2106 | hv_pte_get_##name(HV_PTE pte) \ | ||
2107 | { \ | ||
2108 | return (pte.val >> HV_PTE_INDEX_##bit) & 1; \ | ||
2109 | } \ | ||
2110 | \ | ||
2111 | static __inline HV_PTE \ | ||
2112 | hv_pte_set_##name(HV_PTE pte) \ | ||
2113 | { \ | ||
2114 | pte.val |= 1ULL << HV_PTE_INDEX_##bit; \ | ||
2115 | return pte; \ | ||
2116 | } \ | ||
2117 | \ | ||
2118 | static __inline HV_PTE \ | ||
2119 | hv_pte_clear_##name(HV_PTE pte) \ | ||
2120 | { \ | ||
2121 | pte.val &= ~(1ULL << HV_PTE_INDEX_##bit); \ | ||
2122 | return pte; \ | ||
2123 | } | ||
2124 | |||
2125 | /* Generate accessors to get, set, and clear various PTE flags. | ||
2126 | */ | ||
2127 | _HV_BIT(present, PRESENT) | ||
2128 | _HV_BIT(page, PAGE) | ||
2129 | _HV_BIT(client0, CLIENT0) | ||
2130 | _HV_BIT(client1, CLIENT1) | ||
2131 | _HV_BIT(migrating, MIGRATING) | ||
2132 | _HV_BIT(nc, NC) | ||
2133 | _HV_BIT(readable, READABLE) | ||
2134 | _HV_BIT(writable, WRITABLE) | ||
2135 | _HV_BIT(executable, EXECUTABLE) | ||
2136 | _HV_BIT(accessed, ACCESSED) | ||
2137 | _HV_BIT(dirty, DIRTY) | ||
2138 | _HV_BIT(no_alloc_l1, NO_ALLOC_L1) | ||
2139 | _HV_BIT(no_alloc_l2, NO_ALLOC_L2) | ||
2140 | _HV_BIT(cached_priority, CACHED_PRIORITY) | ||
2141 | _HV_BIT(global, GLOBAL) | ||
2142 | _HV_BIT(user, USER) | ||
2143 | |||
2144 | #undef _HV_BIT | ||
2145 | |||
2146 | /** Get the page mode from the PTE. | ||
2147 | * | ||
2148 | * This field generally determines whether and how accesses to the page | ||
2149 | * are cached; the HV_PTE_MODE_xxx symbols define the legal values for the | ||
2150 | * page mode. The NC, NO_ALLOC_L1, and NO_ALLOC_L2 bits modify this | ||
2151 | * general policy. | ||
2152 | */ | ||
2153 | static __inline unsigned int | ||
2154 | hv_pte_get_mode(const HV_PTE pte) | ||
2155 | { | ||
2156 | return (((__hv32) pte.val) >> HV_PTE_INDEX_MODE) & | ||
2157 | ((1 << HV_PTE_MODE_BITS) - 1); | ||
2158 | } | ||
2159 | |||
2160 | /** Set the page mode into a PTE. See hv_pte_get_mode. */ | ||
2161 | static __inline HV_PTE | ||
2162 | hv_pte_set_mode(HV_PTE pte, unsigned int val) | ||
2163 | { | ||
2164 | pte.val &= ~(((1ULL << HV_PTE_MODE_BITS) - 1) << HV_PTE_INDEX_MODE); | ||
2165 | pte.val |= val << HV_PTE_INDEX_MODE; | ||
2166 | return pte; | ||
2167 | } | ||
2168 | |||
2169 | /** Get the page frame number from the PTE. | ||
2170 | * | ||
2171 | * This field contains the upper bits of the CPA (client physical | ||
2172 | * address) of the target page; the complete CPA is this field with | ||
2173 | * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. | ||
2174 | * | ||
2175 | * For PTEs in a level-1 page table where the Page bit is set, the | ||
2176 | * CPA must be aligned modulo the large page size. | ||
2177 | */ | ||
2178 | static __inline unsigned int | ||
2179 | hv_pte_get_pfn(const HV_PTE pte) | ||
2180 | { | ||
2181 | return pte.val >> HV_PTE_INDEX_PFN; | ||
2182 | } | ||
2183 | |||
2184 | |||
2185 | /** Set the page frame number into a PTE. See hv_pte_get_pfn. */ | ||
2186 | static __inline HV_PTE | ||
2187 | hv_pte_set_pfn(HV_PTE pte, unsigned int val) | ||
2188 | { | ||
2189 | /* | ||
2190 | * Note that the use of "PTFN" in the next line is intentional; we | ||
2191 | * don't want any garbage lower bits left in that field. | ||
2192 | */ | ||
2193 | pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN); | ||
2194 | pte.val |= (__hv64) val << HV_PTE_INDEX_PFN; | ||
2195 | return pte; | ||
2196 | } | ||
2197 | |||
2198 | /** Get the page table frame number from the PTE. | ||
2199 | * | ||
2200 | * This field contains the upper bits of the CPA (client physical | ||
2201 | * address) of the target page table; the complete CPA is this field with | ||
2202 | * with HV_PAGE_TABLE_ALIGN zero bits appended to it. | ||
2203 | * | ||
2204 | * For PTEs in a level-1 page table when the Page bit is not set, the | ||
2205 | * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and | ||
2206 | * the level-2 page table size. | ||
2207 | */ | ||
2208 | static __inline unsigned long | ||
2209 | hv_pte_get_ptfn(const HV_PTE pte) | ||
2210 | { | ||
2211 | return pte.val >> HV_PTE_INDEX_PTFN; | ||
2212 | } | ||
2213 | |||
2214 | |||
2215 | /** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ | ||
2216 | static __inline HV_PTE | ||
2217 | hv_pte_set_ptfn(HV_PTE pte, unsigned long val) | ||
2218 | { | ||
2219 | pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS)-1) << HV_PTE_INDEX_PTFN); | ||
2220 | pte.val |= (__hv64) val << HV_PTE_INDEX_PTFN; | ||
2221 | return pte; | ||
2222 | } | ||
2223 | |||
2224 | |||
2225 | /** Get the remote tile caching this page. | ||
2226 | * | ||
2227 | * Specifies the remote tile which is providing the L3 cache for this page. | ||
2228 | * | ||
2229 | * This field is ignored unless the page mode is HV_PTE_MODE_CACHE_TILE_L3. | ||
2230 | * | ||
2231 | * In level-1 PTEs, if the Page bit is clear, this field determines how the | ||
2232 | * level-2 page table is accessed. | ||
2233 | */ | ||
2234 | static __inline unsigned int | ||
2235 | hv_pte_get_lotar(const HV_PTE pte) | ||
2236 | { | ||
2237 | unsigned int lotar = ((__hv32) pte.val) >> HV_PTE_INDEX_LOTAR; | ||
2238 | |||
2239 | return HV_XY_TO_LOTAR( (lotar >> (HV_PTE_LOTAR_BITS / 2)), | ||
2240 | (lotar & ((1 << (HV_PTE_LOTAR_BITS / 2)) - 1)) ); | ||
2241 | } | ||
2242 | |||
2243 | |||
2244 | /** Set the remote tile caching a page into a PTE. See hv_pte_get_lotar. */ | ||
2245 | static __inline HV_PTE | ||
2246 | hv_pte_set_lotar(HV_PTE pte, unsigned int val) | ||
2247 | { | ||
2248 | unsigned int x = HV_LOTAR_X(val); | ||
2249 | unsigned int y = HV_LOTAR_Y(val); | ||
2250 | |||
2251 | pte.val &= ~(((1ULL << HV_PTE_LOTAR_BITS)-1) << HV_PTE_INDEX_LOTAR); | ||
2252 | pte.val |= (x << (HV_PTE_INDEX_LOTAR + HV_PTE_LOTAR_BITS / 2)) | | ||
2253 | (y << HV_PTE_INDEX_LOTAR); | ||
2254 | return pte; | ||
2255 | } | ||
2256 | |||
2257 | #endif /* !__ASSEMBLER__ */ | ||
2258 | |||
2259 | /** Converts a client physical address to a pfn. */ | ||
2260 | #define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL) | ||
2261 | |||
2262 | /** Converts a pfn to a client physical address. */ | ||
2263 | #define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL) | ||
2264 | |||
2265 | /** Converts a client physical address to a ptfn. */ | ||
2266 | #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) | ||
2267 | |||
2268 | /** Converts a ptfn to a client physical address. */ | ||
2269 | #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) | ||
2270 | |||
2271 | /** Converts a ptfn to a pfn. */ | ||
2272 | #define HV_PTFN_TO_PFN(p) \ | ||
2273 | ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) | ||
2274 | |||
2275 | /** Converts a pfn to a ptfn. */ | ||
2276 | #define HV_PFN_TO_PTFN(p) \ | ||
2277 | ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) | ||
2278 | |||
2279 | #if CHIP_VA_WIDTH() > 32 | ||
2280 | |||
2281 | /** Log number of HV_PTE entries in L0 page table */ | ||
2282 | #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) | ||
2283 | |||
2284 | /** Number of HV_PTE entries in L0 page table */ | ||
2285 | #define HV_L0_ENTRIES (1 << HV_LOG2_L0_ENTRIES) | ||
2286 | |||
2287 | /** Log size of L0 page table in bytes */ | ||
2288 | #define HV_LOG2_L0_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L0_ENTRIES) | ||
2289 | |||
2290 | /** Size of L0 page table in bytes */ | ||
2291 | #define HV_L0_SIZE (1 << HV_LOG2_L0_SIZE) | ||
2292 | |||
2293 | #ifdef __ASSEMBLER__ | ||
2294 | |||
2295 | /** Index in L0 for a specific VA */ | ||
2296 | #define HV_L0_INDEX(va) \ | ||
2297 | (((va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) | ||
2298 | |||
2299 | #else | ||
2300 | |||
2301 | /** Index in L1 for a specific VA */ | ||
2302 | #define HV_L0_INDEX(va) \ | ||
2303 | (((HV_VirtAddr)(va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) | ||
2304 | |||
2305 | #endif | ||
2306 | |||
2307 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2308 | |||
2309 | /** Log number of HV_PTE entries in L1 page table */ | ||
2310 | #define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) | ||
2311 | |||
2312 | /** Number of HV_PTE entries in L1 page table */ | ||
2313 | #define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) | ||
2314 | |||
2315 | /** Log size of L1 page table in bytes */ | ||
2316 | #define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) | ||
2317 | |||
2318 | /** Size of L1 page table in bytes */ | ||
2319 | #define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) | ||
2320 | |||
2321 | /** Log number of HV_PTE entries in level-2 page table */ | ||
2322 | #define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) | ||
2323 | |||
2324 | /** Number of HV_PTE entries in level-2 page table */ | ||
2325 | #define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) | ||
2326 | |||
2327 | /** Log size of level-2 page table in bytes */ | ||
2328 | #define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) | ||
2329 | |||
2330 | /** Size of level-2 page table in bytes */ | ||
2331 | #define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) | ||
2332 | |||
2333 | #ifdef __ASSEMBLER__ | ||
2334 | |||
2335 | #if CHIP_VA_WIDTH() > 32 | ||
2336 | |||
2337 | /** Index in L1 for a specific VA */ | ||
2338 | #define HV_L1_INDEX(va) \ | ||
2339 | (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) | ||
2340 | |||
2341 | #else /* CHIP_VA_WIDTH() > 32 */ | ||
2342 | |||
2343 | /** Index in L1 for a specific VA */ | ||
2344 | #define HV_L1_INDEX(va) \ | ||
2345 | (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) | ||
2346 | |||
2347 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2348 | |||
2349 | /** Index in level-2 page table for a specific VA */ | ||
2350 | #define HV_L2_INDEX(va) \ | ||
2351 | (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) | ||
2352 | |||
2353 | #else /* __ASSEMBLER __ */ | ||
2354 | |||
2355 | #if CHIP_VA_WIDTH() > 32 | ||
2356 | |||
2357 | /** Index in L1 for a specific VA */ | ||
2358 | #define HV_L1_INDEX(va) \ | ||
2359 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) | ||
2360 | |||
2361 | #else /* CHIP_VA_WIDTH() > 32 */ | ||
2362 | |||
2363 | /** Index in L1 for a specific VA */ | ||
2364 | #define HV_L1_INDEX(va) \ | ||
2365 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) | ||
2366 | |||
2367 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2368 | |||
2369 | /** Index in level-2 page table for a specific VA */ | ||
2370 | #define HV_L2_INDEX(va) \ | ||
2371 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) | ||
2372 | |||
2373 | #endif /* __ASSEMBLER __ */ | ||
2374 | |||
2375 | #endif /* _TILE_HV_H */ | ||
diff --git a/arch/tile/include/hv/pagesize.h b/arch/tile/include/hv/pagesize.h new file mode 100644 index 000000000000..58bed114fedd --- /dev/null +++ b/arch/tile/include/hv/pagesize.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file pagesize.h | ||
17 | */ | ||
18 | |||
19 | #ifndef _HV_PAGESIZE_H | ||
20 | #define _HV_PAGESIZE_H | ||
21 | |||
22 | /** The log2 of the size of small pages, in bytes. This value should | ||
23 | * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). | ||
24 | */ | ||
25 | #define HV_LOG2_PAGE_SIZE_SMALL 16 | ||
26 | |||
27 | /** The log2 of the size of large pages, in bytes. This value should be | ||
28 | * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). | ||
29 | */ | ||
30 | #define HV_LOG2_PAGE_SIZE_LARGE 24 | ||
31 | |||
32 | #endif /* _HV_PAGESIZE_H */ | ||
diff --git a/arch/tile/include/hv/syscall_public.h b/arch/tile/include/hv/syscall_public.h new file mode 100644 index 000000000000..9cc0837e69fd --- /dev/null +++ b/arch/tile/include/hv/syscall_public.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file syscall.h | ||
17 | * Indices for the hypervisor system calls that are intended to be called | ||
18 | * directly, rather than only through hypervisor-generated "glue" code. | ||
19 | */ | ||
20 | |||
21 | #ifndef _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H | ||
22 | #define _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H | ||
23 | |||
24 | /** Fast syscall flag bit location. When this bit is set, the hypervisor | ||
25 | * handles the syscall specially. | ||
26 | */ | ||
27 | #define HV_SYS_FAST_SHIFT 14 | ||
28 | |||
29 | /** Fast syscall flag bit mask. */ | ||
30 | #define HV_SYS_FAST_MASK (1 << HV_SYS_FAST_SHIFT) | ||
31 | |||
32 | /** Bit location for flagging fast syscalls that can be called from PL0. */ | ||
33 | #define HV_SYS_FAST_PLO_SHIFT 13 | ||
34 | |||
35 | /** Fast syscall allowing PL0 bit mask. */ | ||
36 | #define HV_SYS_FAST_PL0_MASK (1 << HV_SYS_FAST_PLO_SHIFT) | ||
37 | |||
38 | /** Perform an MF that waits for all victims to reach DRAM. */ | ||
39 | #define HV_SYS_fence_incoherent (51 | HV_SYS_FAST_MASK \ | ||
40 | | HV_SYS_FAST_PL0_MASK) | ||
41 | |||
42 | #endif /* !_SYS_HV_INCLUDE_SYSCALL_PUBLIC_H */ | ||
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile new file mode 100644 index 000000000000..112b1e248f05 --- /dev/null +++ b/arch/tile/kernel/Makefile | |||
@@ -0,0 +1,17 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/TILE kernel. | ||
3 | # | ||
4 | |||
5 | extra-y := vmlinux.lds head_$(BITS).o | ||
6 | obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \ | ||
7 | pci-dma.o proc.o process.o ptrace.o reboot.o \ | ||
8 | setup.o signal.o single_step.o stack.o sys.o time.o traps.o \ | ||
9 | intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o | ||
10 | |||
11 | obj-$(CONFIG_HARDWALL) += hardwall.o | ||
12 | obj-$(CONFIG_TILEGX) += futex_64.o | ||
13 | obj-$(CONFIG_COMPAT) += compat.o compat_signal.o | ||
14 | obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o | ||
15 | obj-$(CONFIG_MODULES) += module.o | ||
16 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
17 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | ||
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c new file mode 100644 index 000000000000..01ddf19cc36d --- /dev/null +++ b/arch/tile/kernel/asm-offsets.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Generates definitions from c-type structures used by assembly sources. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kbuild.h> | ||
18 | #include <linux/thread_info.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/hardirq.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Check for compatible compiler early in the build. */ | ||
25 | #ifdef CONFIG_TILEGX | ||
26 | # ifndef __tilegx__ | ||
27 | # error Can only build TILE-Gx configurations with tilegx compiler | ||
28 | # endif | ||
29 | # ifndef __LP64__ | ||
30 | # error Must not specify -m32 when building the TILE-Gx kernel | ||
31 | # endif | ||
32 | #else | ||
33 | # ifdef __tilegx__ | ||
34 | # error Can not build TILEPro/TILE64 configurations with tilegx compiler | ||
35 | # endif | ||
36 | #endif | ||
37 | |||
38 | void foo(void) | ||
39 | { | ||
40 | DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \ | ||
41 | offsetof(struct single_step_state, buffer)); | ||
42 | DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \ | ||
43 | offsetof(struct single_step_state, flags)); | ||
44 | DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \ | ||
45 | offsetof(struct single_step_state, orig_pc)); | ||
46 | DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \ | ||
47 | offsetof(struct single_step_state, next_pc)); | ||
48 | DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \ | ||
49 | offsetof(struct single_step_state, branch_next_pc)); | ||
50 | DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \ | ||
51 | offsetof(struct single_step_state, update_value)); | ||
52 | |||
53 | DEFINE(THREAD_INFO_TASK_OFFSET, \ | ||
54 | offsetof(struct thread_info, task)); | ||
55 | DEFINE(THREAD_INFO_FLAGS_OFFSET, \ | ||
56 | offsetof(struct thread_info, flags)); | ||
57 | DEFINE(THREAD_INFO_STATUS_OFFSET, \ | ||
58 | offsetof(struct thread_info, status)); | ||
59 | DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \ | ||
60 | offsetof(struct thread_info, homecache_cpu)); | ||
61 | DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \ | ||
62 | offsetof(struct thread_info, step_state)); | ||
63 | |||
64 | DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET, | ||
65 | offsetof(struct task_struct, thread.ksp)); | ||
66 | DEFINE(TASK_STRUCT_THREAD_PC_OFFSET, | ||
67 | offsetof(struct task_struct, thread.pc)); | ||
68 | |||
69 | DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \ | ||
70 | offsetof(HV_Topology, width)); | ||
71 | DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \ | ||
72 | offsetof(HV_Topology, height)); | ||
73 | |||
74 | DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \ | ||
75 | offsetof(irq_cpustat_t, irq_syscall_count)); | ||
76 | } | ||
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c new file mode 100644 index 000000000000..77265f3b58d6 --- /dev/null +++ b/arch/tile/kernel/backtrace.c | |||
@@ -0,0 +1,621 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/string.h> | ||
17 | |||
18 | #include <asm/backtrace.h> | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #if TILE_CHIP < 10 | ||
23 | |||
24 | |||
25 | #include <asm/opcode-tile.h> | ||
26 | |||
27 | |||
28 | #define TREG_SP 54 | ||
29 | #define TREG_LR 55 | ||
30 | |||
31 | |||
32 | /** A decoded bundle used for backtracer analysis. */ | ||
33 | struct BacktraceBundle { | ||
34 | tile_bundle_bits bits; | ||
35 | int num_insns; | ||
36 | struct tile_decoded_instruction | ||
37 | insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]; | ||
38 | }; | ||
39 | |||
40 | |||
41 | /* This implementation only makes sense for native tools. */ | ||
42 | /** Default function to read memory. */ | ||
43 | static bool bt_read_memory(void *result, VirtualAddress addr, | ||
44 | size_t size, void *extra) | ||
45 | { | ||
46 | /* FIXME: this should do some horrible signal stuff to catch | ||
47 | * SEGV cleanly and fail. | ||
48 | * | ||
49 | * Or else the caller should do the setjmp for efficiency. | ||
50 | */ | ||
51 | |||
52 | memcpy(result, (const void *)addr, size); | ||
53 | return true; | ||
54 | } | ||
55 | |||
56 | |||
57 | /** Locates an instruction inside the given bundle that | ||
58 | * has the specified mnemonic, and whose first 'num_operands_to_match' | ||
59 | * operands exactly match those in 'operand_values'. | ||
60 | */ | ||
61 | static const struct tile_decoded_instruction *find_matching_insn( | ||
62 | const struct BacktraceBundle *bundle, | ||
63 | tile_mnemonic mnemonic, | ||
64 | const int *operand_values, | ||
65 | int num_operands_to_match) | ||
66 | { | ||
67 | int i, j; | ||
68 | bool match; | ||
69 | |||
70 | for (i = 0; i < bundle->num_insns; i++) { | ||
71 | const struct tile_decoded_instruction *insn = | ||
72 | &bundle->insns[i]; | ||
73 | |||
74 | if (insn->opcode->mnemonic != mnemonic) | ||
75 | continue; | ||
76 | |||
77 | match = true; | ||
78 | for (j = 0; j < num_operands_to_match; j++) { | ||
79 | if (operand_values[j] != insn->operand_values[j]) { | ||
80 | match = false; | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | if (match) | ||
86 | return insn; | ||
87 | } | ||
88 | |||
89 | return NULL; | ||
90 | } | ||
91 | |||
92 | /** Does this bundle contain an 'iret' instruction? */ | ||
93 | static inline bool bt_has_iret(const struct BacktraceBundle *bundle) | ||
94 | { | ||
95 | return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; | ||
96 | } | ||
97 | |||
98 | /** Does this bundle contain an 'addi sp, sp, OFFSET' or | ||
99 | * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? | ||
100 | */ | ||
101 | static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust) | ||
102 | { | ||
103 | static const int vals[2] = { TREG_SP, TREG_SP }; | ||
104 | |||
105 | const struct tile_decoded_instruction *insn = | ||
106 | find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); | ||
107 | if (insn == NULL) | ||
108 | insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); | ||
109 | if (insn == NULL) | ||
110 | return false; | ||
111 | |||
112 | *adjust = insn->operand_values[2]; | ||
113 | return true; | ||
114 | } | ||
115 | |||
116 | /** Does this bundle contain any 'info OP' or 'infol OP' | ||
117 | * instruction, and if so, what are their OP? Note that OP is interpreted | ||
118 | * as an unsigned value by this code since that's what the caller wants. | ||
119 | * Returns the number of info ops found. | ||
120 | */ | ||
121 | static int bt_get_info_ops(const struct BacktraceBundle *bundle, | ||
122 | int operands[MAX_INFO_OPS_PER_BUNDLE]) | ||
123 | { | ||
124 | int num_ops = 0; | ||
125 | int i; | ||
126 | |||
127 | for (i = 0; i < bundle->num_insns; i++) { | ||
128 | const struct tile_decoded_instruction *insn = | ||
129 | &bundle->insns[i]; | ||
130 | |||
131 | if (insn->opcode->mnemonic == TILE_OPC_INFO || | ||
132 | insn->opcode->mnemonic == TILE_OPC_INFOL) { | ||
133 | operands[num_ops++] = insn->operand_values[0]; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | return num_ops; | ||
138 | } | ||
139 | |||
140 | /** Does this bundle contain a jrp instruction, and if so, to which | ||
141 | * register is it jumping? | ||
142 | */ | ||
143 | static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg) | ||
144 | { | ||
145 | const struct tile_decoded_instruction *insn = | ||
146 | find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0); | ||
147 | if (insn == NULL) | ||
148 | return false; | ||
149 | |||
150 | *target_reg = insn->operand_values[0]; | ||
151 | return true; | ||
152 | } | ||
153 | |||
154 | /** Does this bundle modify the specified register in any way? */ | ||
155 | static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg) | ||
156 | { | ||
157 | int i, j; | ||
158 | for (i = 0; i < bundle->num_insns; i++) { | ||
159 | const struct tile_decoded_instruction *insn = | ||
160 | &bundle->insns[i]; | ||
161 | |||
162 | if (insn->opcode->implicitly_written_register == reg) | ||
163 | return true; | ||
164 | |||
165 | for (j = 0; j < insn->opcode->num_operands; j++) | ||
166 | if (insn->operands[j]->is_dest_reg && | ||
167 | insn->operand_values[j] == reg) | ||
168 | return true; | ||
169 | } | ||
170 | |||
171 | return false; | ||
172 | } | ||
173 | |||
174 | /** Does this bundle modify sp? */ | ||
175 | static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle) | ||
176 | { | ||
177 | return bt_modifies_reg(bundle, TREG_SP); | ||
178 | } | ||
179 | |||
180 | /** Does this bundle modify lr? */ | ||
181 | static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle) | ||
182 | { | ||
183 | return bt_modifies_reg(bundle, TREG_LR); | ||
184 | } | ||
185 | |||
186 | /** Does this bundle contain the instruction 'move fp, sp'? */ | ||
187 | static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle) | ||
188 | { | ||
189 | static const int vals[2] = { 52, TREG_SP }; | ||
190 | return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; | ||
191 | } | ||
192 | |||
193 | /** Does this bundle contain the instruction 'sw sp, lr'? */ | ||
194 | static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle) | ||
195 | { | ||
196 | static const int vals[2] = { TREG_SP, TREG_LR }; | ||
197 | return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; | ||
198 | } | ||
199 | |||
200 | /** Locates the caller's PC and SP for a program starting at the | ||
201 | * given address. | ||
202 | */ | ||
203 | static void find_caller_pc_and_caller_sp(CallerLocation *location, | ||
204 | const VirtualAddress start_pc, | ||
205 | BacktraceMemoryReader read_memory_func, | ||
206 | void *read_memory_func_extra) | ||
207 | { | ||
208 | /* Have we explicitly decided what the sp is, | ||
209 | * rather than just the default? | ||
210 | */ | ||
211 | bool sp_determined = false; | ||
212 | |||
213 | /* Has any bundle seen so far modified lr? */ | ||
214 | bool lr_modified = false; | ||
215 | |||
216 | /* Have we seen a move from sp to fp? */ | ||
217 | bool sp_moved_to_r52 = false; | ||
218 | |||
219 | /* Have we seen a terminating bundle? */ | ||
220 | bool seen_terminating_bundle = false; | ||
221 | |||
222 | /* Cut down on round-trip reading overhead by reading several | ||
223 | * bundles at a time. | ||
224 | */ | ||
225 | tile_bundle_bits prefetched_bundles[32]; | ||
226 | int num_bundles_prefetched = 0; | ||
227 | int next_bundle = 0; | ||
228 | VirtualAddress pc; | ||
229 | |||
230 | /* Default to assuming that the caller's sp is the current sp. | ||
231 | * This is necessary to handle the case where we start backtracing | ||
232 | * right at the end of the epilog. | ||
233 | */ | ||
234 | location->sp_location = SP_LOC_OFFSET; | ||
235 | location->sp_offset = 0; | ||
236 | |||
237 | /* Default to having no idea where the caller PC is. */ | ||
238 | location->pc_location = PC_LOC_UNKNOWN; | ||
239 | |||
240 | /* Don't even try if the PC is not aligned. */ | ||
241 | if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) | ||
242 | return; | ||
243 | |||
244 | for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) { | ||
245 | |||
246 | struct BacktraceBundle bundle; | ||
247 | int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE]; | ||
248 | int one_ago, jrp_reg; | ||
249 | bool has_jrp; | ||
250 | |||
251 | if (next_bundle >= num_bundles_prefetched) { | ||
252 | /* Prefetch some bytes, but don't cross a page | ||
253 | * boundary since that might cause a read failure we | ||
254 | * don't care about if we only need the first few | ||
255 | * bytes. Note: we don't care what the actual page | ||
256 | * size is; using the minimum possible page size will | ||
257 | * prevent any problems. | ||
258 | */ | ||
259 | unsigned int bytes_to_prefetch = 4096 - (pc & 4095); | ||
260 | if (bytes_to_prefetch > sizeof prefetched_bundles) | ||
261 | bytes_to_prefetch = sizeof prefetched_bundles; | ||
262 | |||
263 | if (!read_memory_func(prefetched_bundles, pc, | ||
264 | bytes_to_prefetch, | ||
265 | read_memory_func_extra)) { | ||
266 | if (pc == start_pc) { | ||
267 | /* The program probably called a bad | ||
268 | * address, such as a NULL pointer. | ||
269 | * So treat this as if we are at the | ||
270 | * start of the function prolog so the | ||
271 | * backtrace will show how we got here. | ||
272 | */ | ||
273 | location->pc_location = PC_LOC_IN_LR; | ||
274 | return; | ||
275 | } | ||
276 | |||
277 | /* Unreadable address. Give up. */ | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | next_bundle = 0; | ||
282 | num_bundles_prefetched = | ||
283 | bytes_to_prefetch / sizeof(tile_bundle_bits); | ||
284 | } | ||
285 | |||
286 | /* Decode the next bundle. */ | ||
287 | bundle.bits = prefetched_bundles[next_bundle++]; | ||
288 | bundle.num_insns = | ||
289 | parse_insn_tile(bundle.bits, pc, bundle.insns); | ||
290 | num_info_ops = bt_get_info_ops(&bundle, info_operands); | ||
291 | |||
292 | /* First look at any one_ago info ops if they are interesting, | ||
293 | * since they should shadow any non-one-ago info ops. | ||
294 | */ | ||
295 | for (one_ago = (pc != start_pc) ? 1 : 0; | ||
296 | one_ago >= 0; one_ago--) { | ||
297 | int i; | ||
298 | for (i = 0; i < num_info_ops; i++) { | ||
299 | int info_operand = info_operands[i]; | ||
300 | if (info_operand < CALLER_UNKNOWN_BASE) { | ||
301 | /* Weird; reserved value, ignore it. */ | ||
302 | continue; | ||
303 | } | ||
304 | |||
305 | /* Skip info ops which are not in the | ||
306 | * "one_ago" mode we want right now. | ||
307 | */ | ||
308 | if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0) | ||
309 | != (one_ago != 0)) | ||
310 | continue; | ||
311 | |||
312 | /* Clear the flag to make later checking | ||
313 | * easier. */ | ||
314 | info_operand &= ~ONE_BUNDLE_AGO_FLAG; | ||
315 | |||
316 | /* Default to looking at PC_IN_LR_FLAG. */ | ||
317 | if (info_operand & PC_IN_LR_FLAG) | ||
318 | location->pc_location = | ||
319 | PC_LOC_IN_LR; | ||
320 | else | ||
321 | location->pc_location = | ||
322 | PC_LOC_ON_STACK; | ||
323 | |||
324 | switch (info_operand) { | ||
325 | case CALLER_UNKNOWN_BASE: | ||
326 | location->pc_location = PC_LOC_UNKNOWN; | ||
327 | location->sp_location = SP_LOC_UNKNOWN; | ||
328 | return; | ||
329 | |||
330 | case CALLER_SP_IN_R52_BASE: | ||
331 | case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG: | ||
332 | location->sp_location = SP_LOC_IN_R52; | ||
333 | return; | ||
334 | |||
335 | default: | ||
336 | { | ||
337 | const unsigned int val = info_operand | ||
338 | - CALLER_SP_OFFSET_BASE; | ||
339 | const unsigned int sp_offset = | ||
340 | (val >> NUM_INFO_OP_FLAGS) * 8; | ||
341 | if (sp_offset < 32768) { | ||
342 | /* This is a properly encoded | ||
343 | * SP offset. */ | ||
344 | location->sp_location = | ||
345 | SP_LOC_OFFSET; | ||
346 | location->sp_offset = | ||
347 | sp_offset; | ||
348 | return; | ||
349 | } else { | ||
350 | /* This looked like an SP | ||
351 | * offset, but it's outside | ||
352 | * the legal range, so this | ||
353 | * must be an unrecognized | ||
354 | * info operand. Ignore it. | ||
355 | */ | ||
356 | } | ||
357 | } | ||
358 | break; | ||
359 | } | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (seen_terminating_bundle) { | ||
364 | /* We saw a terminating bundle during the previous | ||
365 | * iteration, so we were only looking for an info op. | ||
366 | */ | ||
367 | break; | ||
368 | } | ||
369 | |||
370 | if (bundle.bits == 0) { | ||
371 | /* Wacky terminating bundle. Stop looping, and hope | ||
372 | * we've already seen enough to find the caller. | ||
373 | */ | ||
374 | break; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * Try to determine caller's SP. | ||
379 | */ | ||
380 | |||
381 | if (!sp_determined) { | ||
382 | int adjust; | ||
383 | if (bt_has_addi_sp(&bundle, &adjust)) { | ||
384 | location->sp_location = SP_LOC_OFFSET; | ||
385 | |||
386 | if (adjust <= 0) { | ||
387 | /* We are in prolog about to adjust | ||
388 | * SP. */ | ||
389 | location->sp_offset = 0; | ||
390 | } else { | ||
391 | /* We are in epilog restoring SP. */ | ||
392 | location->sp_offset = adjust; | ||
393 | } | ||
394 | |||
395 | sp_determined = true; | ||
396 | } else { | ||
397 | if (bt_has_move_r52_sp(&bundle)) { | ||
398 | /* Maybe in prolog, creating an | ||
399 | * alloca-style frame. But maybe in | ||
400 | * the middle of a fixed-size frame | ||
401 | * clobbering r52 with SP. | ||
402 | */ | ||
403 | sp_moved_to_r52 = true; | ||
404 | } | ||
405 | |||
406 | if (bt_modifies_sp(&bundle)) { | ||
407 | if (sp_moved_to_r52) { | ||
408 | /* We saw SP get saved into | ||
409 | * r52 earlier (or now), which | ||
410 | * must have been in the | ||
411 | * prolog, so we now know that | ||
412 | * SP is still holding the | ||
413 | * caller's sp value. | ||
414 | */ | ||
415 | location->sp_location = | ||
416 | SP_LOC_OFFSET; | ||
417 | location->sp_offset = 0; | ||
418 | } else { | ||
419 | /* Someone must have saved | ||
420 | * aside the caller's SP value | ||
421 | * into r52, so r52 holds the | ||
422 | * current value. | ||
423 | */ | ||
424 | location->sp_location = | ||
425 | SP_LOC_IN_R52; | ||
426 | } | ||
427 | sp_determined = true; | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | if (bt_has_iret(&bundle)) { | ||
433 | /* This is a terminating bundle. */ | ||
434 | seen_terminating_bundle = true; | ||
435 | continue; | ||
436 | } | ||
437 | |||
438 | /* | ||
439 | * Try to determine caller's PC. | ||
440 | */ | ||
441 | |||
442 | jrp_reg = -1; | ||
443 | has_jrp = bt_has_jrp(&bundle, &jrp_reg); | ||
444 | if (has_jrp) | ||
445 | seen_terminating_bundle = true; | ||
446 | |||
447 | if (location->pc_location == PC_LOC_UNKNOWN) { | ||
448 | if (has_jrp) { | ||
449 | if (jrp_reg == TREG_LR && !lr_modified) { | ||
450 | /* Looks like a leaf function, or else | ||
451 | * lr is already restored. */ | ||
452 | location->pc_location = | ||
453 | PC_LOC_IN_LR; | ||
454 | } else { | ||
455 | location->pc_location = | ||
456 | PC_LOC_ON_STACK; | ||
457 | } | ||
458 | } else if (bt_has_sw_sp_lr(&bundle)) { | ||
459 | /* In prolog, spilling initial lr to stack. */ | ||
460 | location->pc_location = PC_LOC_IN_LR; | ||
461 | } else if (bt_modifies_lr(&bundle)) { | ||
462 | lr_modified = true; | ||
463 | } | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | void backtrace_init(BacktraceIterator *state, | ||
469 | BacktraceMemoryReader read_memory_func, | ||
470 | void *read_memory_func_extra, | ||
471 | VirtualAddress pc, VirtualAddress lr, | ||
472 | VirtualAddress sp, VirtualAddress r52) | ||
473 | { | ||
474 | CallerLocation location; | ||
475 | VirtualAddress fp, initial_frame_caller_pc; | ||
476 | |||
477 | if (read_memory_func == NULL) { | ||
478 | read_memory_func = bt_read_memory; | ||
479 | } | ||
480 | |||
481 | /* Find out where we are in the initial frame. */ | ||
482 | find_caller_pc_and_caller_sp(&location, pc, | ||
483 | read_memory_func, read_memory_func_extra); | ||
484 | |||
485 | switch (location.sp_location) { | ||
486 | case SP_LOC_UNKNOWN: | ||
487 | /* Give up. */ | ||
488 | fp = -1; | ||
489 | break; | ||
490 | |||
491 | case SP_LOC_IN_R52: | ||
492 | fp = r52; | ||
493 | break; | ||
494 | |||
495 | case SP_LOC_OFFSET: | ||
496 | fp = sp + location.sp_offset; | ||
497 | break; | ||
498 | |||
499 | default: | ||
500 | /* Give up. */ | ||
501 | fp = -1; | ||
502 | break; | ||
503 | } | ||
504 | |||
505 | /* The frame pointer should theoretically be aligned mod 8. If | ||
506 | * it's not even aligned mod 4 then something terrible happened | ||
507 | * and we should mark it as invalid. | ||
508 | */ | ||
509 | if (fp % 4 != 0) | ||
510 | fp = -1; | ||
511 | |||
512 | /* -1 means "don't know initial_frame_caller_pc". */ | ||
513 | initial_frame_caller_pc = -1; | ||
514 | |||
515 | switch (location.pc_location) { | ||
516 | case PC_LOC_UNKNOWN: | ||
517 | /* Give up. */ | ||
518 | fp = -1; | ||
519 | break; | ||
520 | |||
521 | case PC_LOC_IN_LR: | ||
522 | if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { | ||
523 | /* Give up. */ | ||
524 | fp = -1; | ||
525 | } else { | ||
526 | initial_frame_caller_pc = lr; | ||
527 | } | ||
528 | break; | ||
529 | |||
530 | case PC_LOC_ON_STACK: | ||
531 | /* Leave initial_frame_caller_pc as -1, | ||
532 | * meaning check the stack. | ||
533 | */ | ||
534 | break; | ||
535 | |||
536 | default: | ||
537 | /* Give up. */ | ||
538 | fp = -1; | ||
539 | break; | ||
540 | } | ||
541 | |||
542 | state->pc = pc; | ||
543 | state->sp = sp; | ||
544 | state->fp = fp; | ||
545 | state->initial_frame_caller_pc = initial_frame_caller_pc; | ||
546 | state->read_memory_func = read_memory_func; | ||
547 | state->read_memory_func_extra = read_memory_func_extra; | ||
548 | } | ||
549 | |||
550 | bool backtrace_next(BacktraceIterator *state) | ||
551 | { | ||
552 | VirtualAddress next_fp, next_pc, next_frame[2]; | ||
553 | |||
554 | if (state->fp == -1) { | ||
555 | /* No parent frame. */ | ||
556 | return false; | ||
557 | } | ||
558 | |||
559 | /* Try to read the frame linkage data chaining to the next function. */ | ||
560 | if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame, | ||
561 | state->read_memory_func_extra)) { | ||
562 | return false; | ||
563 | } | ||
564 | |||
565 | next_fp = next_frame[1]; | ||
566 | if (next_fp % 4 != 0) { | ||
567 | /* Caller's frame pointer is suspect, so give up. | ||
568 | * Technically it should be aligned mod 8, but we will | ||
569 | * be forgiving here. | ||
570 | */ | ||
571 | return false; | ||
572 | } | ||
573 | |||
574 | if (state->initial_frame_caller_pc != -1) { | ||
575 | /* We must be in the initial stack frame and already know the | ||
576 | * caller PC. | ||
577 | */ | ||
578 | next_pc = state->initial_frame_caller_pc; | ||
579 | |||
580 | /* Force reading stack next time, in case we were in the | ||
581 | * initial frame. We don't do this above just to paranoidly | ||
582 | * avoid changing the struct at all when we return false. | ||
583 | */ | ||
584 | state->initial_frame_caller_pc = -1; | ||
585 | } else { | ||
586 | /* Get the caller PC from the frame linkage area. */ | ||
587 | next_pc = next_frame[0]; | ||
588 | if (next_pc == 0 || | ||
589 | next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { | ||
590 | /* The PC is suspect, so give up. */ | ||
591 | return false; | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* Update state to become the caller's stack frame. */ | ||
596 | state->pc = next_pc; | ||
597 | state->sp = state->fp; | ||
598 | state->fp = next_fp; | ||
599 | |||
600 | return true; | ||
601 | } | ||
602 | |||
603 | #else /* TILE_CHIP < 10 */ | ||
604 | |||
605 | void backtrace_init(BacktraceIterator *state, | ||
606 | BacktraceMemoryReader read_memory_func, | ||
607 | void *read_memory_func_extra, | ||
608 | VirtualAddress pc, VirtualAddress lr, | ||
609 | VirtualAddress sp, VirtualAddress r52) | ||
610 | { | ||
611 | state->pc = pc; | ||
612 | state->sp = sp; | ||
613 | state->fp = -1; | ||
614 | state->initial_frame_caller_pc = -1; | ||
615 | state->read_memory_func = read_memory_func; | ||
616 | state->read_memory_func_extra = read_memory_func_extra; | ||
617 | } | ||
618 | |||
619 | bool backtrace_next(BacktraceIterator *state) { return false; } | ||
620 | |||
621 | #endif /* TILE_CHIP < 10 */ | ||
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c new file mode 100644 index 000000000000..b1e06d041555 --- /dev/null +++ b/arch/tile/kernel/compat.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Adjust unistd.h to provide 32-bit numbers and functions. */ | ||
16 | #define __SYSCALL_COMPAT | ||
17 | |||
18 | #include <linux/compat.h> | ||
19 | #include <linux/msg.h> | ||
20 | #include <linux/syscalls.h> | ||
21 | #include <linux/kdev_t.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/fcntl.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <asm/syscalls.h> | ||
28 | |||
29 | /* | ||
30 | * Syscalls that take 64-bit numbers traditionally take them in 32-bit | ||
31 | * "high" and "low" value parts on 32-bit architectures. | ||
32 | * In principle, one could imagine passing some register arguments as | ||
33 | * fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to | ||
34 | * adapt the usual convention. | ||
35 | */ | ||
36 | |||
37 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high) | ||
38 | { | ||
39 | return sys_truncate(filename, ((loff_t)high << 32) | low); | ||
40 | } | ||
41 | |||
42 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high) | ||
43 | { | ||
44 | return sys_ftruncate(fd, ((loff_t)high << 32) | low); | ||
45 | } | ||
46 | |||
47 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
48 | u32 dummy, u32 low, u32 high) | ||
49 | { | ||
50 | return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low); | ||
51 | } | ||
52 | |||
53 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
54 | u32 dummy, u32 low, u32 high) | ||
55 | { | ||
56 | return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); | ||
57 | } | ||
58 | |||
59 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len) | ||
60 | { | ||
61 | return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len); | ||
62 | } | ||
63 | |||
64 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
65 | u32 offset_lo, u32 offset_hi, | ||
66 | u32 nbytes_lo, u32 nbytes_hi) | ||
67 | { | ||
68 | return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
69 | ((loff_t)nbytes_hi << 32) | nbytes_lo, | ||
70 | flags); | ||
71 | } | ||
72 | |||
73 | long compat_sys_fallocate(int fd, int mode, | ||
74 | u32 offset_lo, u32 offset_hi, | ||
75 | u32 len_lo, u32 len_hi) | ||
76 | { | ||
77 | return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo, | ||
78 | ((loff_t)len_hi << 32) | len_lo); | ||
79 | } | ||
80 | |||
81 | |||
82 | |||
83 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
84 | struct compat_timespec __user *interval) | ||
85 | { | ||
86 | struct timespec t; | ||
87 | int ret; | ||
88 | mm_segment_t old_fs = get_fs(); | ||
89 | |||
90 | set_fs(KERNEL_DS); | ||
91 | ret = sys_sched_rr_get_interval(pid, | ||
92 | (struct timespec __force __user *)&t); | ||
93 | set_fs(old_fs); | ||
94 | if (put_compat_timespec(&t, interval)) | ||
95 | return -EFAULT; | ||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming | ||
101 | * some different calling convention than our normal 32-bit tile code. | ||
102 | */ | ||
103 | |||
104 | /* Already defined in ipc/compat.c, but we need it here. */ | ||
105 | struct compat_msgbuf { | ||
106 | compat_long_t mtype; | ||
107 | char mtext[1]; | ||
108 | }; | ||
109 | |||
110 | long tile_compat_sys_msgsnd(int msqid, | ||
111 | struct compat_msgbuf __user *msgp, | ||
112 | size_t msgsz, int msgflg) | ||
113 | { | ||
114 | compat_long_t mtype; | ||
115 | |||
116 | if (get_user(mtype, &msgp->mtype)) | ||
117 | return -EFAULT; | ||
118 | return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); | ||
119 | } | ||
120 | |||
121 | long tile_compat_sys_msgrcv(int msqid, | ||
122 | struct compat_msgbuf __user *msgp, | ||
123 | size_t msgsz, long msgtyp, int msgflg) | ||
124 | { | ||
125 | long err, mtype; | ||
126 | |||
127 | err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); | ||
128 | if (err < 0) | ||
129 | goto out; | ||
130 | |||
131 | if (put_user(mtype, &msgp->mtype)) | ||
132 | err = -EFAULT; | ||
133 | out: | ||
134 | return err; | ||
135 | } | ||
136 | |||
137 | /* Provide the compat syscall number to call mapping. */ | ||
138 | #undef __SYSCALL | ||
139 | #define __SYSCALL(nr, call) [nr] = (compat_##call), | ||
140 | |||
141 | /* The generic versions of these don't work for Tile. */ | ||
142 | #define compat_sys_msgrcv tile_compat_sys_msgrcv | ||
143 | #define compat_sys_msgsnd tile_compat_sys_msgsnd | ||
144 | |||
145 | /* See comments in sys.c */ | ||
146 | #define compat_sys_fadvise64 sys32_fadvise64 | ||
147 | #define compat_sys_fadvise64_64 sys32_fadvise64_64 | ||
148 | #define compat_sys_readahead sys32_readahead | ||
149 | #define compat_sys_sync_file_range compat_sys_sync_file_range2 | ||
150 | |||
151 | /* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ | ||
152 | #define compat_sys_stat64 sys_newstat | ||
153 | #define compat_sys_lstat64 sys_newlstat | ||
154 | #define compat_sys_fstat64 sys_newfstat | ||
155 | #define compat_sys_fstatat64 sys_newfstatat | ||
156 | |||
157 | /* Pass full 64-bit values through ptrace. */ | ||
158 | #define compat_sys_ptrace tile_compat_sys_ptrace | ||
159 | |||
160 | /* | ||
161 | * Note that we can't include <linux/unistd.h> here since the header | ||
162 | * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. | ||
163 | */ | ||
164 | void *compat_sys_call_table[__NR_syscalls] = { | ||
165 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
166 | #include <asm/unistd.h> | ||
167 | }; | ||
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c new file mode 100644 index 000000000000..d5efb215dd5f --- /dev/null +++ b/arch/tile/kernel/compat_signal.c | |||
@@ -0,0 +1,435 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <linux/unistd.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/personality.h> | ||
26 | #include <linux/suspend.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/compat.h> | ||
30 | #include <linux/syscalls.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <asm/processor.h> | ||
33 | #include <asm/ucontext.h> | ||
34 | #include <asm/sigframe.h> | ||
35 | #include <asm/syscalls.h> | ||
36 | #include <arch/interrupts.h> | ||
37 | |||
38 | struct compat_sigaction { | ||
39 | compat_uptr_t sa_handler; | ||
40 | compat_ulong_t sa_flags; | ||
41 | compat_uptr_t sa_restorer; | ||
42 | sigset_t sa_mask __packed; | ||
43 | }; | ||
44 | |||
45 | struct compat_sigaltstack { | ||
46 | compat_uptr_t ss_sp; | ||
47 | int ss_flags; | ||
48 | compat_size_t ss_size; | ||
49 | }; | ||
50 | |||
51 | struct compat_ucontext { | ||
52 | compat_ulong_t uc_flags; | ||
53 | compat_uptr_t uc_link; | ||
54 | struct compat_sigaltstack uc_stack; | ||
55 | struct sigcontext uc_mcontext; | ||
56 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
57 | }; | ||
58 | |||
59 | struct compat_siginfo { | ||
60 | int si_signo; | ||
61 | int si_errno; | ||
62 | int si_code; | ||
63 | |||
64 | union { | ||
65 | int _pad[SI_PAD_SIZE]; | ||
66 | |||
67 | /* kill() */ | ||
68 | struct { | ||
69 | unsigned int _pid; /* sender's pid */ | ||
70 | unsigned int _uid; /* sender's uid */ | ||
71 | } _kill; | ||
72 | |||
73 | /* POSIX.1b timers */ | ||
74 | struct { | ||
75 | compat_timer_t _tid; /* timer id */ | ||
76 | int _overrun; /* overrun count */ | ||
77 | compat_sigval_t _sigval; /* same as below */ | ||
78 | int _sys_private; /* not to be passed to user */ | ||
79 | int _overrun_incr; /* amount to add to overrun */ | ||
80 | } _timer; | ||
81 | |||
82 | /* POSIX.1b signals */ | ||
83 | struct { | ||
84 | unsigned int _pid; /* sender's pid */ | ||
85 | unsigned int _uid; /* sender's uid */ | ||
86 | compat_sigval_t _sigval; | ||
87 | } _rt; | ||
88 | |||
89 | /* SIGCHLD */ | ||
90 | struct { | ||
91 | unsigned int _pid; /* which child */ | ||
92 | unsigned int _uid; /* sender's uid */ | ||
93 | int _status; /* exit code */ | ||
94 | compat_clock_t _utime; | ||
95 | compat_clock_t _stime; | ||
96 | } _sigchld; | ||
97 | |||
98 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
99 | struct { | ||
100 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
101 | #ifdef __ARCH_SI_TRAPNO | ||
102 | int _trapno; /* TRAP # which caused the signal */ | ||
103 | #endif | ||
104 | } _sigfault; | ||
105 | |||
106 | /* SIGPOLL */ | ||
107 | struct { | ||
108 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
109 | int _fd; | ||
110 | } _sigpoll; | ||
111 | } _sifields; | ||
112 | }; | ||
113 | |||
114 | struct compat_rt_sigframe { | ||
115 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
116 | struct compat_siginfo info; | ||
117 | struct compat_ucontext uc; | ||
118 | }; | ||
119 | |||
120 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
121 | |||
122 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
123 | struct compat_sigaction __user *oact, | ||
124 | size_t sigsetsize) | ||
125 | { | ||
126 | struct k_sigaction new_sa, old_sa; | ||
127 | int ret = -EINVAL; | ||
128 | |||
129 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
130 | if (sigsetsize != sizeof(sigset_t)) | ||
131 | goto out; | ||
132 | |||
133 | if (act) { | ||
134 | compat_uptr_t handler, restorer; | ||
135 | |||
136 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
137 | __get_user(handler, &act->sa_handler) || | ||
138 | __get_user(new_sa.sa.sa_flags, &act->sa_flags) || | ||
139 | __get_user(restorer, &act->sa_restorer) || | ||
140 | __copy_from_user(&new_sa.sa.sa_mask, &act->sa_mask, | ||
141 | sizeof(sigset_t))) | ||
142 | return -EFAULT; | ||
143 | new_sa.sa.sa_handler = compat_ptr(handler); | ||
144 | new_sa.sa.sa_restorer = compat_ptr(restorer); | ||
145 | } | ||
146 | |||
147 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | ||
148 | |||
149 | if (!ret && oact) { | ||
150 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
151 | __put_user(ptr_to_compat(old_sa.sa.sa_handler), | ||
152 | &oact->sa_handler) || | ||
153 | __put_user(ptr_to_compat(old_sa.sa.sa_restorer), | ||
154 | &oact->sa_restorer) || | ||
155 | __put_user(old_sa.sa.sa_flags, &oact->sa_flags) || | ||
156 | __copy_to_user(&oact->sa_mask, &old_sa.sa.sa_mask, | ||
157 | sizeof(sigset_t))) | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | out: | ||
161 | return ret; | ||
162 | } | ||
163 | |||
164 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
165 | struct compat_siginfo __user *uinfo) | ||
166 | { | ||
167 | siginfo_t info; | ||
168 | int ret; | ||
169 | mm_segment_t old_fs = get_fs(); | ||
170 | |||
171 | if (copy_siginfo_from_user32(&info, uinfo)) | ||
172 | return -EFAULT; | ||
173 | set_fs(KERNEL_DS); | ||
174 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *)&info); | ||
175 | set_fs(old_fs); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) | ||
180 | { | ||
181 | int err; | ||
182 | |||
183 | if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo))) | ||
184 | return -EFAULT; | ||
185 | |||
186 | /* If you change siginfo_t structure, please make sure that | ||
187 | this code is fixed accordingly. | ||
188 | It should never copy any pad contained in the structure | ||
189 | to avoid security leaks, but must copy the generic | ||
190 | 3 ints plus the relevant union member. */ | ||
191 | err = __put_user(from->si_signo, &to->si_signo); | ||
192 | err |= __put_user(from->si_errno, &to->si_errno); | ||
193 | err |= __put_user((short)from->si_code, &to->si_code); | ||
194 | |||
195 | if (from->si_code < 0) { | ||
196 | err |= __put_user(from->si_pid, &to->si_pid); | ||
197 | err |= __put_user(from->si_uid, &to->si_uid); | ||
198 | err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); | ||
199 | } else { | ||
200 | /* | ||
201 | * First 32bits of unions are always present: | ||
202 | * si_pid === si_band === si_tid === si_addr(LS half) | ||
203 | */ | ||
204 | err |= __put_user(from->_sifields._pad[0], | ||
205 | &to->_sifields._pad[0]); | ||
206 | switch (from->si_code >> 16) { | ||
207 | case __SI_FAULT >> 16: | ||
208 | break; | ||
209 | case __SI_CHLD >> 16: | ||
210 | err |= __put_user(from->si_utime, &to->si_utime); | ||
211 | err |= __put_user(from->si_stime, &to->si_stime); | ||
212 | err |= __put_user(from->si_status, &to->si_status); | ||
213 | /* FALL THROUGH */ | ||
214 | default: | ||
215 | case __SI_KILL >> 16: | ||
216 | err |= __put_user(from->si_uid, &to->si_uid); | ||
217 | break; | ||
218 | case __SI_POLL >> 16: | ||
219 | err |= __put_user(from->si_fd, &to->si_fd); | ||
220 | break; | ||
221 | case __SI_TIMER >> 16: | ||
222 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
223 | err |= __put_user(ptr_to_compat(from->si_ptr), | ||
224 | &to->si_ptr); | ||
225 | break; | ||
226 | /* This is not generated by the kernel as of now. */ | ||
227 | case __SI_RT >> 16: | ||
228 | case __SI_MESGQ >> 16: | ||
229 | err |= __put_user(from->si_uid, &to->si_uid); | ||
230 | err |= __put_user(from->si_int, &to->si_int); | ||
231 | break; | ||
232 | } | ||
233 | } | ||
234 | return err; | ||
235 | } | ||
236 | |||
237 | int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) | ||
238 | { | ||
239 | int err; | ||
240 | u32 ptr32; | ||
241 | |||
242 | if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) | ||
243 | return -EFAULT; | ||
244 | |||
245 | err = __get_user(to->si_signo, &from->si_signo); | ||
246 | err |= __get_user(to->si_errno, &from->si_errno); | ||
247 | err |= __get_user(to->si_code, &from->si_code); | ||
248 | |||
249 | err |= __get_user(to->si_pid, &from->si_pid); | ||
250 | err |= __get_user(to->si_uid, &from->si_uid); | ||
251 | err |= __get_user(ptr32, &from->si_ptr); | ||
252 | to->si_ptr = compat_ptr(ptr32); | ||
253 | |||
254 | return err; | ||
255 | } | ||
256 | |||
257 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
258 | struct compat_sigaltstack __user *uoss_ptr, | ||
259 | struct pt_regs *regs) | ||
260 | { | ||
261 | stack_t uss, uoss; | ||
262 | int ret; | ||
263 | mm_segment_t seg; | ||
264 | |||
265 | if (uss_ptr) { | ||
266 | u32 ptr; | ||
267 | |||
268 | memset(&uss, 0, sizeof(stack_t)); | ||
269 | if (!access_ok(VERIFY_READ, uss_ptr, sizeof(*uss_ptr)) || | ||
270 | __get_user(ptr, &uss_ptr->ss_sp) || | ||
271 | __get_user(uss.ss_flags, &uss_ptr->ss_flags) || | ||
272 | __get_user(uss.ss_size, &uss_ptr->ss_size)) | ||
273 | return -EFAULT; | ||
274 | uss.ss_sp = compat_ptr(ptr); | ||
275 | } | ||
276 | seg = get_fs(); | ||
277 | set_fs(KERNEL_DS); | ||
278 | ret = do_sigaltstack(uss_ptr ? (stack_t __user __force *)&uss : NULL, | ||
279 | (stack_t __user __force *)&uoss, | ||
280 | (unsigned long)compat_ptr(regs->sp)); | ||
281 | set_fs(seg); | ||
282 | if (ret >= 0 && uoss_ptr) { | ||
283 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(*uoss_ptr)) || | ||
284 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || | ||
285 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || | ||
286 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) | ||
287 | ret = -EFAULT; | ||
288 | } | ||
289 | return ret; | ||
290 | } | ||
291 | |||
292 | long _compat_sys_rt_sigreturn(struct pt_regs *regs) | ||
293 | { | ||
294 | struct compat_rt_sigframe __user *frame = | ||
295 | (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); | ||
296 | sigset_t set; | ||
297 | long r0; | ||
298 | |||
299 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
300 | goto badframe; | ||
301 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
302 | goto badframe; | ||
303 | |||
304 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
305 | spin_lock_irq(¤t->sighand->siglock); | ||
306 | current->blocked = set; | ||
307 | recalc_sigpending(); | ||
308 | spin_unlock_irq(¤t->sighand->siglock); | ||
309 | |||
310 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) | ||
311 | goto badframe; | ||
312 | |||
313 | if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) | ||
314 | goto badframe; | ||
315 | |||
316 | return r0; | ||
317 | |||
318 | badframe: | ||
319 | force_sig(SIGSEGV, current); | ||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Determine which stack to use.. | ||
325 | */ | ||
326 | static inline void __user *compat_get_sigframe(struct k_sigaction *ka, | ||
327 | struct pt_regs *regs, | ||
328 | size_t frame_size) | ||
329 | { | ||
330 | unsigned long sp; | ||
331 | |||
332 | /* Default to using normal stack */ | ||
333 | sp = (unsigned long)compat_ptr(regs->sp); | ||
334 | |||
335 | /* | ||
336 | * If we are on the alternate signal stack and would overflow | ||
337 | * it, don't. Return an always-bogus address instead so we | ||
338 | * will die with SIGSEGV. | ||
339 | */ | ||
340 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
341 | return (void __user __force *)-1UL; | ||
342 | |||
343 | /* This is the X/Open sanctioned signal stack switching. */ | ||
344 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
345 | if (sas_ss_flags(sp) == 0) | ||
346 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
347 | } | ||
348 | |||
349 | sp -= frame_size; | ||
350 | /* | ||
351 | * Align the stack pointer according to the TILE ABI, | ||
352 | * i.e. so that on function entry (sp & 15) == 0. | ||
353 | */ | ||
354 | sp &= -16UL; | ||
355 | return (void __user *) sp; | ||
356 | } | ||
357 | |||
358 | int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
359 | sigset_t *set, struct pt_regs *regs) | ||
360 | { | ||
361 | unsigned long restorer; | ||
362 | struct compat_rt_sigframe __user *frame; | ||
363 | int err = 0; | ||
364 | int usig; | ||
365 | |||
366 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | ||
367 | |||
368 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
369 | goto give_sigsegv; | ||
370 | |||
371 | usig = current_thread_info()->exec_domain | ||
372 | && current_thread_info()->exec_domain->signal_invmap | ||
373 | && sig < 32 | ||
374 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
375 | : sig; | ||
376 | |||
377 | /* Always write at least the signal number for the stack backtracer. */ | ||
378 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
379 | /* At sigreturn time, restore the callee-save registers too. */ | ||
380 | err |= copy_siginfo_to_user32(&frame->info, info); | ||
381 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
382 | } else { | ||
383 | err |= __put_user(info->si_signo, &frame->info.si_signo); | ||
384 | } | ||
385 | |||
386 | /* Create the ucontext. */ | ||
387 | err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); | ||
388 | err |= __put_user(0, &frame->uc.uc_flags); | ||
389 | err |= __put_user(0, &frame->uc.uc_link); | ||
390 | err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)), | ||
391 | &frame->uc.uc_stack.ss_sp); | ||
392 | err |= __put_user(sas_ss_flags(regs->sp), | ||
393 | &frame->uc.uc_stack.ss_flags); | ||
394 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
395 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | ||
396 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
397 | if (err) | ||
398 | goto give_sigsegv; | ||
399 | |||
400 | restorer = VDSO_BASE; | ||
401 | if (ka->sa.sa_flags & SA_RESTORER) | ||
402 | restorer = ptr_to_compat_reg(ka->sa.sa_restorer); | ||
403 | |||
404 | /* | ||
405 | * Set up registers for signal handler. | ||
406 | * Registers that we don't modify keep the value they had from | ||
407 | * user-space at the time we took the signal. | ||
408 | */ | ||
409 | regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); | ||
410 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | ||
411 | regs->sp = ptr_to_compat_reg(frame); | ||
412 | regs->lr = restorer; | ||
413 | regs->regs[0] = (unsigned long) usig; | ||
414 | |||
415 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
416 | /* Need extra arguments, so mark to restore caller-saves. */ | ||
417 | regs->regs[1] = ptr_to_compat_reg(&frame->info); | ||
418 | regs->regs[2] = ptr_to_compat_reg(&frame->uc); | ||
419 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
420 | } | ||
421 | |||
422 | /* | ||
423 | * Notify any tracer that was single-stepping it. | ||
424 | * The tracer may want to single-step inside the | ||
425 | * handler too. | ||
426 | */ | ||
427 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
428 | ptrace_notify(SIGTRAP); | ||
429 | |||
430 | return 0; | ||
431 | |||
432 | give_sigsegv: | ||
433 | force_sigsegv(sig, current); | ||
434 | return -EFAULT; | ||
435 | } | ||
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c new file mode 100644 index 000000000000..2c54fd43a8a0 --- /dev/null +++ b/arch/tile/kernel/early_printk.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/console.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | static void early_hv_write(struct console *con, const char *s, unsigned n) | ||
23 | { | ||
24 | hv_console_write((HV_VirtAddr) s, n); | ||
25 | } | ||
26 | |||
27 | static struct console early_hv_console = { | ||
28 | .name = "earlyhv", | ||
29 | .write = early_hv_write, | ||
30 | .flags = CON_PRINTBUFFER, | ||
31 | .index = -1, | ||
32 | }; | ||
33 | |||
34 | /* Direct interface for emergencies */ | ||
35 | static struct console *early_console = &early_hv_console; | ||
36 | static int early_console_initialized; | ||
37 | static int early_console_complete; | ||
38 | |||
39 | static void early_vprintk(const char *fmt, va_list ap) | ||
40 | { | ||
41 | char buf[512]; | ||
42 | int n = vscnprintf(buf, sizeof(buf), fmt, ap); | ||
43 | early_console->write(early_console, buf, n); | ||
44 | } | ||
45 | |||
46 | void early_printk(const char *fmt, ...) | ||
47 | { | ||
48 | va_list ap; | ||
49 | va_start(ap, fmt); | ||
50 | early_vprintk(fmt, ap); | ||
51 | va_end(ap); | ||
52 | } | ||
53 | |||
54 | void early_panic(const char *fmt, ...) | ||
55 | { | ||
56 | va_list ap; | ||
57 | raw_local_irq_disable_all(); | ||
58 | va_start(ap, fmt); | ||
59 | early_printk("Kernel panic - not syncing: "); | ||
60 | early_vprintk(fmt, ap); | ||
61 | early_console->write(early_console, "\n", 1); | ||
62 | va_end(ap); | ||
63 | dump_stack(); | ||
64 | hv_halt(); | ||
65 | } | ||
66 | |||
67 | static int __initdata keep_early; | ||
68 | |||
69 | static int __init setup_early_printk(char *str) | ||
70 | { | ||
71 | if (early_console_initialized) | ||
72 | return 1; | ||
73 | |||
74 | if (str != NULL && strncmp(str, "keep", 4) == 0) | ||
75 | keep_early = 1; | ||
76 | |||
77 | early_console = &early_hv_console; | ||
78 | early_console_initialized = 1; | ||
79 | register_console(early_console); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | void __init disable_early_printk(void) | ||
85 | { | ||
86 | early_console_complete = 1; | ||
87 | if (!early_console_initialized || !early_console) | ||
88 | return; | ||
89 | if (!keep_early) { | ||
90 | early_printk("disabling early console\n"); | ||
91 | unregister_console(early_console); | ||
92 | early_console_initialized = 0; | ||
93 | } else { | ||
94 | early_printk("keeping early console\n"); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | void warn_early_printk(void) | ||
99 | { | ||
100 | if (early_console_complete || early_console_initialized) | ||
101 | return; | ||
102 | early_printk("\ | ||
103 | Machine shutting down before console output is fully initialized.\n\ | ||
104 | You may wish to reboot and add the option 'earlyprintk' to your\n\ | ||
105 | boot command line to see any diagnostic early console output.\n\ | ||
106 | "); | ||
107 | } | ||
108 | |||
109 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S new file mode 100644 index 000000000000..3d01383b1b0e --- /dev/null +++ b/arch/tile/kernel/entry.S | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/unistd.h> | ||
17 | #include <asm/irqflags.h> | ||
18 | #include <arch/abi.h> | ||
19 | |||
20 | #ifdef __tilegx__ | ||
21 | #define bnzt bnezt | ||
22 | #endif | ||
23 | |||
24 | STD_ENTRY(current_text_addr) | ||
25 | { move r0, lr; jrp lr } | ||
26 | STD_ENDPROC(current_text_addr) | ||
27 | |||
28 | STD_ENTRY(_sim_syscall) | ||
29 | /* | ||
30 | * Wait for r0-r9 to be ready (and lr on the off chance we | ||
31 | * want the syscall to locate its caller), then make a magic | ||
32 | * simulator syscall. | ||
33 | * | ||
34 | * We carefully stall until the registers are readable in case they | ||
35 | * are the target of a slow load, etc. so that tile-sim will | ||
36 | * definitely be able to read all of them inside the magic syscall. | ||
37 | * | ||
38 | * Technically this is wrong for r3-r9 and lr, since an interrupt | ||
39 | * could come in and restore the registers with a slow load right | ||
40 | * before executing the mtspr. We may need to modify tile-sim to | ||
41 | * explicitly stall for this case, but we do not yet have | ||
42 | * a way to implement such a stall. | ||
43 | */ | ||
44 | { and zero, lr, r9 ; and zero, r8, r7 } | ||
45 | { and zero, r6, r5 ; and zero, r4, r3 } | ||
46 | { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 } | ||
47 | { jrp lr } | ||
48 | STD_ENDPROC(_sim_syscall) | ||
49 | |||
50 | /* | ||
51 | * Implement execve(). The i386 code has a note that forking from kernel | ||
52 | * space results in no copy on write until the execve, so we should be | ||
53 | * careful not to write to the stack here. | ||
54 | */ | ||
55 | STD_ENTRY(kernel_execve) | ||
56 | moveli TREG_SYSCALL_NR_NAME, __NR_execve | ||
57 | swint1 | ||
58 | jrp lr | ||
59 | STD_ENDPROC(kernel_execve) | ||
60 | |||
61 | /* Delay a fixed number of cycles. */ | ||
62 | STD_ENTRY(__delay) | ||
63 | { addi r0, r0, -1; bnzt r0, . } | ||
64 | jrp lr | ||
65 | STD_ENDPROC(__delay) | ||
66 | |||
67 | /* | ||
68 | * We don't run this function directly, but instead copy it to a page | ||
69 | * we map into every user process. See vdso_setup(). | ||
70 | * | ||
71 | * Note that libc has a copy of this function that it uses to compare | ||
72 | * against the PC when a stack backtrace ends, so if this code is | ||
73 | * changed, the libc implementation(s) should also be updated. | ||
74 | */ | ||
75 | .pushsection .data | ||
76 | ENTRY(__rt_sigreturn) | ||
77 | moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn | ||
78 | swint1 | ||
79 | ENDPROC(__rt_sigreturn) | ||
80 | ENTRY(__rt_sigreturn_end) | ||
81 | .popsection | ||
82 | |||
83 | STD_ENTRY(dump_stack) | ||
84 | { move r2, lr; lnk r1 } | ||
85 | { move r4, r52; addli r1, r1, dump_stack - . } | ||
86 | { move r3, sp; j _dump_stack } | ||
87 | jrp lr /* keep backtracer happy */ | ||
88 | STD_ENDPROC(dump_stack) | ||
89 | |||
90 | STD_ENTRY(KBacktraceIterator_init_current) | ||
91 | { move r2, lr; lnk r1 } | ||
92 | { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } | ||
93 | { move r3, sp; j _KBacktraceIterator_init_current } | ||
94 | jrp lr /* keep backtracer happy */ | ||
95 | STD_ENDPROC(KBacktraceIterator_init_current) | ||
96 | |||
97 | /* | ||
98 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | ||
99 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | ||
100 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | ||
101 | */ | ||
102 | STD_ENTRY(cpu_idle_on_new_stack) | ||
103 | { | ||
104 | move sp, r1 | ||
105 | mtspr SYSTEM_SAVE_1_0, r2 | ||
106 | } | ||
107 | jal free_thread_info | ||
108 | j cpu_idle | ||
109 | STD_ENDPROC(cpu_idle_on_new_stack) | ||
110 | |||
111 | /* Loop forever on a nap during SMP boot. */ | ||
112 | STD_ENTRY(smp_nap) | ||
113 | nap | ||
114 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ | ||
115 | jrp lr /* clue in the backtracer */ | ||
116 | STD_ENDPROC(smp_nap) | ||
117 | |||
118 | /* | ||
119 | * Enable interrupts racelessly and then nap until interrupted. | ||
120 | * This function's _cpu_idle_nap address is special; see intvec.S. | ||
121 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | ||
122 | * as a result return to the function that called _cpu_idle(). | ||
123 | */ | ||
124 | STD_ENTRY(_cpu_idle) | ||
125 | { | ||
126 | lnk r0 | ||
127 | movei r1, 1 | ||
128 | } | ||
129 | { | ||
130 | addli r0, r0, _cpu_idle_nap - . | ||
131 | mtspr INTERRUPT_CRITICAL_SECTION, r1 | ||
132 | } | ||
133 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ | ||
134 | mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ | ||
135 | mtspr EX_CONTEXT_1_0, r0 | ||
136 | iret | ||
137 | .global _cpu_idle_nap | ||
138 | _cpu_idle_nap: | ||
139 | nap | ||
140 | jrp lr | ||
141 | STD_ENDPROC(_cpu_idle) | ||
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c new file mode 100644 index 000000000000..584b965dc824 --- /dev/null +++ b/arch/tile/kernel/hardwall.c | |||
@@ -0,0 +1,796 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/fs.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/cdev.h> | ||
25 | #include <linux/compat.h> | ||
26 | #include <asm/hardwall.h> | ||
27 | #include <asm/traps.h> | ||
28 | #include <asm/siginfo.h> | ||
29 | #include <asm/irq_regs.h> | ||
30 | |||
31 | #include <arch/interrupts.h> | ||
32 | #include <arch/spr_def.h> | ||
33 | |||
34 | |||
35 | /* | ||
36 | * This data structure tracks the rectangle data, etc., associated | ||
37 | * one-to-one with a "struct file *" from opening HARDWALL_FILE. | ||
38 | * Note that the file's private data points back to this structure. | ||
39 | */ | ||
40 | struct hardwall_info { | ||
41 | struct list_head list; /* "rectangles" list */ | ||
42 | struct list_head task_head; /* head of tasks in this hardwall */ | ||
43 | int ulhc_x; /* upper left hand corner x coord */ | ||
44 | int ulhc_y; /* upper left hand corner y coord */ | ||
45 | int width; /* rectangle width */ | ||
46 | int height; /* rectangle height */ | ||
47 | int teardown_in_progress; /* are we tearing this one down? */ | ||
48 | }; | ||
49 | |||
50 | /* Currently allocated hardwall rectangles */ | ||
51 | static LIST_HEAD(rectangles); | ||
52 | |||
53 | /* | ||
54 | * Guard changes to the hardwall data structures. | ||
55 | * This could be finer grained (e.g. one lock for the list of hardwall | ||
56 | * rectangles, then separate embedded locks for each one's list of tasks), | ||
57 | * but there are subtle correctness issues when trying to start with | ||
58 | * a task's "hardwall" pointer and lock the correct rectangle's embedded | ||
59 | * lock in the presence of a simultaneous deactivation, so it seems | ||
60 | * easier to have a single lock, given that none of these data | ||
61 | * structures are touched very frequently during normal operation. | ||
62 | */ | ||
63 | static DEFINE_SPINLOCK(hardwall_lock); | ||
64 | |||
65 | /* Allow disabling UDN access. */ | ||
66 | static int udn_disabled; | ||
67 | static int __init noudn(char *str) | ||
68 | { | ||
69 | pr_info("User-space UDN access is disabled\n"); | ||
70 | udn_disabled = 1; | ||
71 | return 0; | ||
72 | } | ||
73 | early_param("noudn", noudn); | ||
74 | |||
75 | |||
76 | /* | ||
77 | * Low-level primitives | ||
78 | */ | ||
79 | |||
80 | /* Set a CPU bit if the CPU is online. */ | ||
81 | #define cpu_online_set(cpu, dst) do { \ | ||
82 | if (cpu_online(cpu)) \ | ||
83 | cpumask_set_cpu(cpu, dst); \ | ||
84 | } while (0) | ||
85 | |||
86 | |||
87 | /* Does the given rectangle contain the given x,y coordinate? */ | ||
88 | static int contains(struct hardwall_info *r, int x, int y) | ||
89 | { | ||
90 | return (x >= r->ulhc_x && x < r->ulhc_x + r->width) && | ||
91 | (y >= r->ulhc_y && y < r->ulhc_y + r->height); | ||
92 | } | ||
93 | |||
94 | /* Compute the rectangle parameters and validate the cpumask. */ | ||
95 | static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) | ||
96 | { | ||
97 | int x, y, cpu, ulhc, lrhc; | ||
98 | |||
99 | /* The first cpu is the ULHC, the last the LRHC. */ | ||
100 | ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits); | ||
101 | lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits); | ||
102 | |||
103 | /* Compute the rectangle attributes from the cpus. */ | ||
104 | r->ulhc_x = cpu_x(ulhc); | ||
105 | r->ulhc_y = cpu_y(ulhc); | ||
106 | r->width = cpu_x(lrhc) - r->ulhc_x + 1; | ||
107 | r->height = cpu_y(lrhc) - r->ulhc_y + 1; | ||
108 | |||
109 | /* Width and height must be positive */ | ||
110 | if (r->width <= 0 || r->height <= 0) | ||
111 | return -EINVAL; | ||
112 | |||
113 | /* Confirm that the cpumask is exactly the rectangle. */ | ||
114 | for (y = 0, cpu = 0; y < smp_height; ++y) | ||
115 | for (x = 0; x < smp_width; ++x, ++cpu) | ||
116 | if (cpumask_test_cpu(cpu, mask) != contains(r, x, y)) | ||
117 | return -EINVAL; | ||
118 | |||
119 | /* | ||
120 | * Note that offline cpus can't be drained when this UDN | ||
121 | * rectangle eventually closes. We used to detect this | ||
122 | * situation and print a warning, but it annoyed users and | ||
123 | * they ignored it anyway, so now we just return without a | ||
124 | * warning. | ||
125 | */ | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /* Do the two given rectangles overlap on any cpu? */ | ||
130 | static int overlaps(struct hardwall_info *a, struct hardwall_info *b) | ||
131 | { | ||
132 | return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */ | ||
133 | b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */ | ||
134 | a->ulhc_y + a->height > b->ulhc_y && /* A not above */ | ||
135 | b->ulhc_y + b->height > a->ulhc_y; /* B not above */ | ||
136 | } | ||
137 | |||
138 | |||
139 | /* | ||
140 | * Hardware management of hardwall setup, teardown, trapping, | ||
141 | * and enabling/disabling PL0 access to the networks. | ||
142 | */ | ||
143 | |||
144 | /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */ | ||
145 | enum direction_protect { | ||
146 | N_PROTECT = (1 << 0), | ||
147 | E_PROTECT = (1 << 1), | ||
148 | S_PROTECT = (1 << 2), | ||
149 | W_PROTECT = (1 << 3) | ||
150 | }; | ||
151 | |||
152 | static void enable_firewall_interrupts(void) | ||
153 | { | ||
154 | raw_local_irq_unmask_now(INT_UDN_FIREWALL); | ||
155 | } | ||
156 | |||
157 | static void disable_firewall_interrupts(void) | ||
158 | { | ||
159 | raw_local_irq_mask_now(INT_UDN_FIREWALL); | ||
160 | } | ||
161 | |||
162 | /* Set up hardwall on this cpu based on the passed hardwall_info. */ | ||
163 | static void hardwall_setup_ipi_func(void *info) | ||
164 | { | ||
165 | struct hardwall_info *r = info; | ||
166 | int cpu = smp_processor_id(); | ||
167 | int x = cpu % smp_width; | ||
168 | int y = cpu / smp_width; | ||
169 | int bits = 0; | ||
170 | if (x == r->ulhc_x) | ||
171 | bits |= W_PROTECT; | ||
172 | if (x == r->ulhc_x + r->width - 1) | ||
173 | bits |= E_PROTECT; | ||
174 | if (y == r->ulhc_y) | ||
175 | bits |= N_PROTECT; | ||
176 | if (y == r->ulhc_y + r->height - 1) | ||
177 | bits |= S_PROTECT; | ||
178 | BUG_ON(bits == 0); | ||
179 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); | ||
180 | enable_firewall_interrupts(); | ||
181 | |||
182 | } | ||
183 | |||
184 | /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ | ||
185 | static void hardwall_setup(struct hardwall_info *r) | ||
186 | { | ||
187 | int x, y, cpu, delta; | ||
188 | struct cpumask rect_cpus; | ||
189 | |||
190 | cpumask_clear(&rect_cpus); | ||
191 | |||
192 | /* First include the top and bottom edges */ | ||
193 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | ||
194 | delta = (r->height - 1) * smp_width; | ||
195 | for (x = 0; x < r->width; ++x, ++cpu) { | ||
196 | cpu_online_set(cpu, &rect_cpus); | ||
197 | cpu_online_set(cpu + delta, &rect_cpus); | ||
198 | } | ||
199 | |||
200 | /* Then the left and right edges */ | ||
201 | cpu -= r->width; | ||
202 | delta = r->width - 1; | ||
203 | for (y = 0; y < r->height; ++y, cpu += smp_width) { | ||
204 | cpu_online_set(cpu, &rect_cpus); | ||
205 | cpu_online_set(cpu + delta, &rect_cpus); | ||
206 | } | ||
207 | |||
208 | /* Then tell all the cpus to set up their protection SPR */ | ||
209 | on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); | ||
210 | } | ||
211 | |||
212 | void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) | ||
213 | { | ||
214 | struct hardwall_info *rect; | ||
215 | struct task_struct *p; | ||
216 | struct siginfo info; | ||
217 | int x, y; | ||
218 | int cpu = smp_processor_id(); | ||
219 | int found_processes; | ||
220 | unsigned long flags; | ||
221 | |||
222 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
223 | irq_enter(); | ||
224 | |||
225 | /* This tile trapped a network access; find the rectangle. */ | ||
226 | x = cpu % smp_width; | ||
227 | y = cpu / smp_width; | ||
228 | spin_lock_irqsave(&hardwall_lock, flags); | ||
229 | list_for_each_entry(rect, &rectangles, list) { | ||
230 | if (contains(rect, x, y)) | ||
231 | break; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * It shouldn't be possible not to find this cpu on the | ||
236 | * rectangle list, since only cpus in rectangles get hardwalled. | ||
237 | * The hardwall is only removed after the UDN is drained. | ||
238 | */ | ||
239 | BUG_ON(&rect->list == &rectangles); | ||
240 | |||
241 | /* | ||
242 | * If we already started teardown on this hardwall, don't worry; | ||
243 | * the abort signal has been sent and we are just waiting for things | ||
244 | * to quiesce. | ||
245 | */ | ||
246 | if (rect->teardown_in_progress) { | ||
247 | pr_notice("cpu %d: detected hardwall violation %#lx" | ||
248 | " while teardown already in progress\n", | ||
249 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); | ||
250 | goto done; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Kill off any process that is activated in this rectangle. | ||
255 | * We bypass security to deliver the signal, since it must be | ||
256 | * one of the activated processes that generated the UDN | ||
257 | * message that caused this trap, and all the activated | ||
258 | * processes shared a single open file so are pretty tightly | ||
259 | * bound together from a security point of view to begin with. | ||
260 | */ | ||
261 | rect->teardown_in_progress = 1; | ||
262 | wmb(); /* Ensure visibility of rectangle before notifying processes. */ | ||
263 | pr_notice("cpu %d: detected hardwall violation %#lx...\n", | ||
264 | cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); | ||
265 | info.si_signo = SIGILL; | ||
266 | info.si_errno = 0; | ||
267 | info.si_code = ILL_HARDWALL; | ||
268 | found_processes = 0; | ||
269 | list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { | ||
270 | BUG_ON(p->thread.hardwall != rect); | ||
271 | if (p->sighand) { | ||
272 | found_processes = 1; | ||
273 | pr_notice("hardwall: killing %d\n", p->pid); | ||
274 | spin_lock(&p->sighand->siglock); | ||
275 | __group_send_sig_info(info.si_signo, &info, p); | ||
276 | spin_unlock(&p->sighand->siglock); | ||
277 | } | ||
278 | } | ||
279 | if (!found_processes) | ||
280 | pr_notice("hardwall: no associated processes!\n"); | ||
281 | |||
282 | done: | ||
283 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
284 | |||
285 | /* | ||
286 | * We have to disable firewall interrupts now, or else when we | ||
287 | * return from this handler, we will simply re-interrupt back to | ||
288 | * it. However, we can't clear the protection bits, since we | ||
289 | * haven't yet drained the network, and that would allow packets | ||
290 | * to cross out of the hardwall region. | ||
291 | */ | ||
292 | disable_firewall_interrupts(); | ||
293 | |||
294 | irq_exit(); | ||
295 | set_irq_regs(old_regs); | ||
296 | } | ||
297 | |||
298 | /* Allow access from user space to the UDN. */ | ||
299 | void grant_network_mpls(void) | ||
300 | { | ||
301 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); | ||
302 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); | ||
303 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); | ||
304 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); | ||
305 | #if !CHIP_HAS_REV1_XDN() | ||
306 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); | ||
307 | __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); | ||
308 | #endif | ||
309 | } | ||
310 | |||
311 | /* Deny access from user space to the UDN. */ | ||
312 | void restrict_network_mpls(void) | ||
313 | { | ||
314 | __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); | ||
315 | __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); | ||
316 | __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); | ||
317 | __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); | ||
318 | #if !CHIP_HAS_REV1_XDN() | ||
319 | __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); | ||
320 | __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | |||
325 | /* | ||
326 | * Code to create, activate, deactivate, and destroy hardwall rectangles. | ||
327 | */ | ||
328 | |||
329 | /* Create a hardwall for the given rectangle */ | ||
330 | static struct hardwall_info *hardwall_create( | ||
331 | size_t size, const unsigned char __user *bits) | ||
332 | { | ||
333 | struct hardwall_info *iter, *rect; | ||
334 | struct cpumask mask; | ||
335 | unsigned long flags; | ||
336 | int rc; | ||
337 | |||
338 | /* Reject crazy sizes out of hand, a la sys_mbind(). */ | ||
339 | if (size > PAGE_SIZE) | ||
340 | return ERR_PTR(-EINVAL); | ||
341 | |||
342 | /* Copy whatever fits into a cpumask. */ | ||
343 | if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size))) | ||
344 | return ERR_PTR(-EFAULT); | ||
345 | |||
346 | /* | ||
347 | * If the size was short, clear the rest of the mask; | ||
348 | * otherwise validate that the rest of the user mask was zero | ||
349 | * (we don't try hard to be efficient when validating huge masks). | ||
350 | */ | ||
351 | if (size < sizeof(struct cpumask)) { | ||
352 | memset((char *)&mask + size, 0, sizeof(struct cpumask) - size); | ||
353 | } else if (size > sizeof(struct cpumask)) { | ||
354 | size_t i; | ||
355 | for (i = sizeof(struct cpumask); i < size; ++i) { | ||
356 | char c; | ||
357 | if (get_user(c, &bits[i])) | ||
358 | return ERR_PTR(-EFAULT); | ||
359 | if (c) | ||
360 | return ERR_PTR(-EINVAL); | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /* Allocate a new rectangle optimistically. */ | ||
365 | rect = kmalloc(sizeof(struct hardwall_info), | ||
366 | GFP_KERNEL | __GFP_ZERO); | ||
367 | if (rect == NULL) | ||
368 | return ERR_PTR(-ENOMEM); | ||
369 | INIT_LIST_HEAD(&rect->task_head); | ||
370 | |||
371 | /* Compute the rectangle size and validate that it's plausible. */ | ||
372 | rc = setup_rectangle(rect, &mask); | ||
373 | if (rc != 0) { | ||
374 | kfree(rect); | ||
375 | return ERR_PTR(rc); | ||
376 | } | ||
377 | |||
378 | /* Confirm it doesn't overlap and add it to the list. */ | ||
379 | spin_lock_irqsave(&hardwall_lock, flags); | ||
380 | list_for_each_entry(iter, &rectangles, list) { | ||
381 | if (overlaps(iter, rect)) { | ||
382 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
383 | kfree(rect); | ||
384 | return ERR_PTR(-EBUSY); | ||
385 | } | ||
386 | } | ||
387 | list_add_tail(&rect->list, &rectangles); | ||
388 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
389 | |||
390 | /* Set up appropriate hardwalling on all affected cpus. */ | ||
391 | hardwall_setup(rect); | ||
392 | |||
393 | return rect; | ||
394 | } | ||
395 | |||
396 | /* Activate a given hardwall on this cpu for this process. */ | ||
397 | static int hardwall_activate(struct hardwall_info *rect) | ||
398 | { | ||
399 | int cpu, x, y; | ||
400 | unsigned long flags; | ||
401 | struct task_struct *p = current; | ||
402 | struct thread_struct *ts = &p->thread; | ||
403 | |||
404 | /* Require a rectangle. */ | ||
405 | if (rect == NULL) | ||
406 | return -ENODATA; | ||
407 | |||
408 | /* Not allowed to activate a rectangle that is being torn down. */ | ||
409 | if (rect->teardown_in_progress) | ||
410 | return -EINVAL; | ||
411 | |||
412 | /* | ||
413 | * Get our affinity; if we're not bound to this tile uniquely, | ||
414 | * we can't access the network registers. | ||
415 | */ | ||
416 | if (cpumask_weight(&p->cpus_allowed) != 1) | ||
417 | return -EPERM; | ||
418 | |||
419 | /* Make sure we are bound to a cpu in this rectangle. */ | ||
420 | cpu = smp_processor_id(); | ||
421 | BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); | ||
422 | x = cpu_x(cpu); | ||
423 | y = cpu_y(cpu); | ||
424 | if (!contains(rect, x, y)) | ||
425 | return -EINVAL; | ||
426 | |||
427 | /* If we are already bound to this hardwall, it's a no-op. */ | ||
428 | if (ts->hardwall) { | ||
429 | BUG_ON(ts->hardwall != rect); | ||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | /* Success! This process gets to use the user networks on this cpu. */ | ||
434 | ts->hardwall = rect; | ||
435 | spin_lock_irqsave(&hardwall_lock, flags); | ||
436 | list_add(&ts->hardwall_list, &rect->task_head); | ||
437 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
438 | grant_network_mpls(); | ||
439 | printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", | ||
440 | p->pid, p->comm, cpu); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * Deactivate a task's hardwall. Must hold hardwall_lock. | ||
446 | * This method may be called from free_task(), so we don't want to | ||
447 | * rely on too many fields of struct task_struct still being valid. | ||
448 | * We assume the cpus_allowed, pid, and comm fields are still valid. | ||
449 | */ | ||
450 | static void _hardwall_deactivate(struct task_struct *task) | ||
451 | { | ||
452 | struct thread_struct *ts = &task->thread; | ||
453 | |||
454 | if (cpumask_weight(&task->cpus_allowed) != 1) { | ||
455 | pr_err("pid %d (%s) releasing networks with" | ||
456 | " an affinity mask containing %d cpus!\n", | ||
457 | task->pid, task->comm, | ||
458 | cpumask_weight(&task->cpus_allowed)); | ||
459 | BUG(); | ||
460 | } | ||
461 | |||
462 | BUG_ON(ts->hardwall == NULL); | ||
463 | ts->hardwall = NULL; | ||
464 | list_del(&ts->hardwall_list); | ||
465 | if (task == current) | ||
466 | restrict_network_mpls(); | ||
467 | } | ||
468 | |||
469 | /* Deactivate a task's hardwall. */ | ||
470 | int hardwall_deactivate(struct task_struct *task) | ||
471 | { | ||
472 | unsigned long flags; | ||
473 | int activated; | ||
474 | |||
475 | spin_lock_irqsave(&hardwall_lock, flags); | ||
476 | activated = (task->thread.hardwall != NULL); | ||
477 | if (activated) | ||
478 | _hardwall_deactivate(task); | ||
479 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
480 | |||
481 | if (!activated) | ||
482 | return -EINVAL; | ||
483 | |||
484 | printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", | ||
485 | task->pid, task->comm, smp_processor_id()); | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | /* Stop a UDN switch before draining the network. */ | ||
490 | static void stop_udn_switch(void *ignored) | ||
491 | { | ||
492 | #if !CHIP_HAS_REV1_XDN() | ||
493 | /* Freeze the switch and the demux. */ | ||
494 | __insn_mtspr(SPR_UDN_SP_FREEZE, | ||
495 | SPR_UDN_SP_FREEZE__SP_FRZ_MASK | | ||
496 | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | | ||
497 | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); | ||
498 | #endif | ||
499 | } | ||
500 | |||
501 | /* Drain all the state from a stopped switch. */ | ||
502 | static void drain_udn_switch(void *ignored) | ||
503 | { | ||
504 | #if !CHIP_HAS_REV1_XDN() | ||
505 | int i; | ||
506 | int from_tile_words, ca_count; | ||
507 | |||
508 | /* Empty out the 5 switch point fifos. */ | ||
509 | for (i = 0; i < 5; i++) { | ||
510 | int words, j; | ||
511 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | ||
512 | words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF; | ||
513 | for (j = 0; j < words; j++) | ||
514 | (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA); | ||
515 | BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0); | ||
516 | } | ||
517 | |||
518 | /* Dump out the 3 word fifo at top. */ | ||
519 | from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3; | ||
520 | for (i = 0; i < from_tile_words; i++) | ||
521 | (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); | ||
522 | |||
523 | /* Empty out demuxes. */ | ||
524 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) | ||
525 | (void) __tile_udn0_receive(); | ||
526 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) | ||
527 | (void) __tile_udn1_receive(); | ||
528 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) | ||
529 | (void) __tile_udn2_receive(); | ||
530 | while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) | ||
531 | (void) __tile_udn3_receive(); | ||
532 | BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0); | ||
533 | |||
534 | /* Empty out catch all. */ | ||
535 | ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); | ||
536 | for (i = 0; i < ca_count; i++) | ||
537 | (void) __insn_mfspr(SPR_UDN_CA_DATA); | ||
538 | BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0); | ||
539 | |||
540 | /* Clear demux logic. */ | ||
541 | __insn_mtspr(SPR_UDN_DEMUX_CTL, 1); | ||
542 | |||
543 | /* | ||
544 | * Write switch state; experimentation indicates that 0xc3000 | ||
545 | * is an idle switch point. | ||
546 | */ | ||
547 | for (i = 0; i < 5; i++) { | ||
548 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); | ||
549 | __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000); | ||
550 | } | ||
551 | #endif | ||
552 | } | ||
553 | |||
554 | /* Reset random UDN state registers at boot up and during hardwall teardown. */ | ||
555 | void reset_network_state(void) | ||
556 | { | ||
557 | #if !CHIP_HAS_REV1_XDN() | ||
558 | /* Reset UDN coordinates to their standard value */ | ||
559 | unsigned int cpu = smp_processor_id(); | ||
560 | unsigned int x = cpu % smp_width; | ||
561 | unsigned int y = cpu / smp_width; | ||
562 | #endif | ||
563 | |||
564 | if (udn_disabled) | ||
565 | return; | ||
566 | |||
567 | #if !CHIP_HAS_REV1_XDN() | ||
568 | __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); | ||
569 | |||
570 | /* Set demux tags to predefined values and enable them. */ | ||
571 | __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); | ||
572 | __insn_mtspr(SPR_UDN_TAG_0, (1 << 0)); | ||
573 | __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); | ||
574 | __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); | ||
575 | __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); | ||
576 | #endif | ||
577 | |||
578 | /* Clear out other random registers so we have a clean slate. */ | ||
579 | __insn_mtspr(SPR_UDN_AVAIL_EN, 0); | ||
580 | __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0); | ||
581 | #if !CHIP_HAS_REV1_XDN() | ||
582 | __insn_mtspr(SPR_UDN_REFILL_EN, 0); | ||
583 | __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); | ||
584 | __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); | ||
585 | #endif | ||
586 | |||
587 | /* Start the switch and demux. */ | ||
588 | #if !CHIP_HAS_REV1_XDN() | ||
589 | __insn_mtspr(SPR_UDN_SP_FREEZE, 0); | ||
590 | #endif | ||
591 | } | ||
592 | |||
593 | /* Restart a UDN switch after draining. */ | ||
594 | static void restart_udn_switch(void *ignored) | ||
595 | { | ||
596 | reset_network_state(); | ||
597 | |||
598 | /* Disable firewall interrupts. */ | ||
599 | __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); | ||
600 | disable_firewall_interrupts(); | ||
601 | } | ||
602 | |||
603 | /* Build a struct cpumask containing all valid tiles in bounding rectangle. */ | ||
604 | static void fill_mask(struct hardwall_info *r, struct cpumask *result) | ||
605 | { | ||
606 | int x, y, cpu; | ||
607 | |||
608 | cpumask_clear(result); | ||
609 | |||
610 | cpu = r->ulhc_y * smp_width + r->ulhc_x; | ||
611 | for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { | ||
612 | for (x = 0; x < r->width; ++x, ++cpu) | ||
613 | cpu_online_set(cpu, result); | ||
614 | } | ||
615 | } | ||
616 | |||
617 | /* Last reference to a hardwall is gone, so clear the network. */ | ||
618 | static void hardwall_destroy(struct hardwall_info *rect) | ||
619 | { | ||
620 | struct task_struct *task; | ||
621 | unsigned long flags; | ||
622 | struct cpumask mask; | ||
623 | |||
624 | /* Make sure this file actually represents a rectangle. */ | ||
625 | if (rect == NULL) | ||
626 | return; | ||
627 | |||
628 | /* | ||
629 | * Deactivate any remaining tasks. It's possible to race with | ||
630 | * some other thread that is exiting and hasn't yet called | ||
631 | * deactivate (when freeing its thread_info), so we carefully | ||
632 | * deactivate any remaining tasks before freeing the | ||
633 | * hardwall_info object itself. | ||
634 | */ | ||
635 | spin_lock_irqsave(&hardwall_lock, flags); | ||
636 | list_for_each_entry(task, &rect->task_head, thread.hardwall_list) | ||
637 | _hardwall_deactivate(task); | ||
638 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
639 | |||
640 | /* Drain the UDN. */ | ||
641 | printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", | ||
642 | rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); | ||
643 | fill_mask(rect, &mask); | ||
644 | on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); | ||
645 | on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); | ||
646 | |||
647 | /* Restart switch and disable firewall. */ | ||
648 | on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); | ||
649 | |||
650 | /* Now free the rectangle from the list. */ | ||
651 | spin_lock_irqsave(&hardwall_lock, flags); | ||
652 | BUG_ON(!list_empty(&rect->task_head)); | ||
653 | list_del(&rect->list); | ||
654 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
655 | kfree(rect); | ||
656 | } | ||
657 | |||
658 | |||
659 | /* | ||
660 | * Dump hardwall state via /proc; initialized in arch/tile/sys/proc.c. | ||
661 | */ | ||
662 | int proc_tile_hardwall_show(struct seq_file *sf, void *v) | ||
663 | { | ||
664 | struct hardwall_info *r; | ||
665 | |||
666 | if (udn_disabled) { | ||
667 | seq_printf(sf, "%dx%d 0,0 pids:\n", smp_width, smp_height); | ||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | spin_lock_irq(&hardwall_lock); | ||
672 | list_for_each_entry(r, &rectangles, list) { | ||
673 | struct task_struct *p; | ||
674 | seq_printf(sf, "%dx%d %d,%d pids:", | ||
675 | r->width, r->height, r->ulhc_x, r->ulhc_y); | ||
676 | list_for_each_entry(p, &r->task_head, thread.hardwall_list) { | ||
677 | unsigned int cpu = cpumask_first(&p->cpus_allowed); | ||
678 | unsigned int x = cpu % smp_width; | ||
679 | unsigned int y = cpu / smp_width; | ||
680 | seq_printf(sf, " %d@%d,%d", p->pid, x, y); | ||
681 | } | ||
682 | seq_printf(sf, "\n"); | ||
683 | } | ||
684 | spin_unlock_irq(&hardwall_lock); | ||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | |||
689 | /* | ||
690 | * Character device support via ioctl/close. | ||
691 | */ | ||
692 | |||
693 | static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) | ||
694 | { | ||
695 | struct hardwall_info *rect = file->private_data; | ||
696 | |||
697 | if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) | ||
698 | return -EINVAL; | ||
699 | |||
700 | switch (_IOC_NR(a)) { | ||
701 | case _HARDWALL_CREATE: | ||
702 | if (udn_disabled) | ||
703 | return -ENOSYS; | ||
704 | if (rect != NULL) | ||
705 | return -EALREADY; | ||
706 | rect = hardwall_create(_IOC_SIZE(a), | ||
707 | (const unsigned char __user *)b); | ||
708 | if (IS_ERR(rect)) | ||
709 | return PTR_ERR(rect); | ||
710 | file->private_data = rect; | ||
711 | return 0; | ||
712 | |||
713 | case _HARDWALL_ACTIVATE: | ||
714 | return hardwall_activate(rect); | ||
715 | |||
716 | case _HARDWALL_DEACTIVATE: | ||
717 | if (current->thread.hardwall != rect) | ||
718 | return -EINVAL; | ||
719 | return hardwall_deactivate(current); | ||
720 | |||
721 | default: | ||
722 | return -EINVAL; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | #ifdef CONFIG_COMPAT | ||
727 | static long hardwall_compat_ioctl(struct file *file, | ||
728 | unsigned int a, unsigned long b) | ||
729 | { | ||
730 | /* Sign-extend the argument so it can be used as a pointer. */ | ||
731 | return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b)); | ||
732 | } | ||
733 | #endif | ||
734 | |||
735 | /* The user process closed the file; revoke access to user networks. */ | ||
736 | static int hardwall_flush(struct file *file, fl_owner_t owner) | ||
737 | { | ||
738 | struct hardwall_info *rect = file->private_data; | ||
739 | struct task_struct *task, *tmp; | ||
740 | unsigned long flags; | ||
741 | |||
742 | if (rect) { | ||
743 | /* | ||
744 | * NOTE: if multiple threads are activated on this hardwall | ||
745 | * file, the other threads will continue having access to the | ||
746 | * UDN until they are context-switched out and back in again. | ||
747 | * | ||
748 | * NOTE: A NULL files pointer means the task is being torn | ||
749 | * down, so in that case we also deactivate it. | ||
750 | */ | ||
751 | spin_lock_irqsave(&hardwall_lock, flags); | ||
752 | list_for_each_entry_safe(task, tmp, &rect->task_head, | ||
753 | thread.hardwall_list) { | ||
754 | if (task->files == owner || task->files == NULL) | ||
755 | _hardwall_deactivate(task); | ||
756 | } | ||
757 | spin_unlock_irqrestore(&hardwall_lock, flags); | ||
758 | } | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | /* This hardwall is gone, so destroy it. */ | ||
764 | static int hardwall_release(struct inode *inode, struct file *file) | ||
765 | { | ||
766 | hardwall_destroy(file->private_data); | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static const struct file_operations dev_hardwall_fops = { | ||
771 | .unlocked_ioctl = hardwall_ioctl, | ||
772 | #ifdef CONFIG_COMPAT | ||
773 | .compat_ioctl = hardwall_compat_ioctl, | ||
774 | #endif | ||
775 | .flush = hardwall_flush, | ||
776 | .release = hardwall_release, | ||
777 | }; | ||
778 | |||
779 | static struct cdev hardwall_dev; | ||
780 | |||
781 | static int __init dev_hardwall_init(void) | ||
782 | { | ||
783 | int rc; | ||
784 | dev_t dev; | ||
785 | |||
786 | rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); | ||
787 | if (rc < 0) | ||
788 | return rc; | ||
789 | cdev_init(&hardwall_dev, &dev_hardwall_fops); | ||
790 | rc = cdev_add(&hardwall_dev, dev, 1); | ||
791 | if (rc < 0) | ||
792 | return rc; | ||
793 | |||
794 | return 0; | ||
795 | } | ||
796 | late_initcall(dev_hardwall_init); | ||
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S new file mode 100644 index 000000000000..2b4f6c091701 --- /dev/null +++ b/arch/tile/kernel/head_32.S | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE startup code. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <hv/hypervisor.h> | ||
25 | #include <arch/chip.h> | ||
26 | |||
27 | /* | ||
28 | * This module contains the entry code for kernel images. It performs the | ||
29 | * minimal setup needed to call the generic C routines. | ||
30 | */ | ||
31 | |||
32 | __HEAD | ||
33 | ENTRY(_start) | ||
34 | /* Notify the hypervisor of what version of the API we want */ | ||
35 | { | ||
36 | movei r1, TILE_CHIP | ||
37 | movei r2, TILE_CHIP_REV | ||
38 | } | ||
39 | { | ||
40 | moveli r0, _HV_VERSION | ||
41 | jal hv_init | ||
42 | } | ||
43 | /* Get a reasonable default ASID in r0 */ | ||
44 | { | ||
45 | move r0, zero | ||
46 | jal hv_inquire_asid | ||
47 | } | ||
48 | /* Install the default page table */ | ||
49 | { | ||
50 | moveli r6, lo16(swapper_pgprot - PAGE_OFFSET) | ||
51 | move r4, r0 /* use starting ASID of range for this page table */ | ||
52 | } | ||
53 | { | ||
54 | moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET) | ||
55 | auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET) | ||
56 | } | ||
57 | { | ||
58 | lw r2, r6 | ||
59 | addi r6, r6, 4 | ||
60 | } | ||
61 | { | ||
62 | lw r3, r6 | ||
63 | auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) | ||
64 | } | ||
65 | { | ||
66 | inv r6 | ||
67 | move r1, zero /* high 32 bits of CPA is zero */ | ||
68 | } | ||
69 | { | ||
70 | moveli lr, lo16(1f) | ||
71 | move r5, zero | ||
72 | } | ||
73 | { | ||
74 | auli lr, lr, ha16(1f) | ||
75 | j hv_install_context | ||
76 | } | ||
77 | 1: | ||
78 | |||
79 | /* Get our processor number and save it away in SAVE_1_0. */ | ||
80 | jal hv_inquire_topology | ||
81 | mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ | ||
82 | add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ | ||
83 | |||
84 | #ifdef CONFIG_SMP | ||
85 | /* | ||
86 | * Load up our per-cpu offset. When the first (master) tile | ||
87 | * boots, this value is still zero, so we will load boot_pc | ||
88 | * with start_kernel, and boot_sp with init_stack + THREAD_SIZE. | ||
89 | * The master tile initializes the per-cpu offset array, so that | ||
90 | * when subsequent (secondary) tiles boot, they will instead load | ||
91 | * from their per-cpu versions of boot_sp and boot_pc. | ||
92 | */ | ||
93 | moveli r5, lo16(__per_cpu_offset) | ||
94 | auli r5, r5, ha16(__per_cpu_offset) | ||
95 | s2a r5, r4, r5 | ||
96 | lw r5, r5 | ||
97 | bnz r5, 1f | ||
98 | |||
99 | /* | ||
100 | * Save the width and height to the smp_topology variable | ||
101 | * for later use. | ||
102 | */ | ||
103 | moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) | ||
104 | auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) | ||
105 | { | ||
106 | sw r0, r2 | ||
107 | addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET) | ||
108 | } | ||
109 | sw r0, r3 | ||
110 | 1: | ||
111 | #else | ||
112 | move r5, zero | ||
113 | #endif | ||
114 | |||
115 | /* Load and go with the correct pc and sp. */ | ||
116 | { | ||
117 | addli r1, r5, lo16(boot_sp) | ||
118 | addli r0, r5, lo16(boot_pc) | ||
119 | } | ||
120 | { | ||
121 | auli r1, r1, ha16(boot_sp) | ||
122 | auli r0, r0, ha16(boot_pc) | ||
123 | } | ||
124 | lw r0, r0 | ||
125 | lw sp, r1 | ||
126 | or r4, sp, r4 | ||
127 | mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ | ||
128 | addi sp, sp, -STACK_TOP_DELTA | ||
129 | { | ||
130 | move lr, zero /* stop backtraces in the called function */ | ||
131 | jr r0 | ||
132 | } | ||
133 | ENDPROC(_start) | ||
134 | |||
135 | .section ".bss.page_aligned","w" | ||
136 | .align PAGE_SIZE | ||
137 | ENTRY(empty_zero_page) | ||
138 | .fill PAGE_SIZE,1,0 | ||
139 | END(empty_zero_page) | ||
140 | |||
141 | .macro PTE va, cpa, bits1, no_org=0 | ||
142 | .ifeq \no_org | ||
143 | .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE | ||
144 | .endif | ||
145 | .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ | ||
146 | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) | ||
147 | .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) | ||
148 | .endm | ||
149 | |||
150 | .section ".data.page_aligned","wa" | ||
151 | .align PAGE_SIZE | ||
152 | ENTRY(swapper_pg_dir) | ||
153 | /* | ||
154 | * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as | ||
155 | * VA = PA + PAGE_OFFSET. We remap things with more precise access | ||
156 | * permissions and more respect for size of RAM later. | ||
157 | */ | ||
158 | .set addr, 0 | ||
159 | .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT | ||
160 | PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE | ||
161 | .set addr, addr + PGDIR_SIZE | ||
162 | .endr | ||
163 | |||
164 | /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ | ||
165 | PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE | ||
166 | .org swapper_pg_dir + HV_L1_SIZE | ||
167 | END(swapper_pg_dir) | ||
168 | |||
169 | /* | ||
170 | * Isolate swapper_pgprot to its own cache line, since each cpu | ||
171 | * starting up will read it using VA-is-PA and local homing. | ||
172 | * This would otherwise likely conflict with other data on the cache | ||
173 | * line, once we have set its permanent home in the page tables. | ||
174 | */ | ||
175 | __INITDATA | ||
176 | .align CHIP_L2_LINE_SIZE() | ||
177 | ENTRY(swapper_pgprot) | ||
178 | PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1 | ||
179 | .align CHIP_L2_LINE_SIZE() | ||
180 | END(swapper_pgprot) | ||
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds new file mode 100644 index 000000000000..2b7cd0a659a9 --- /dev/null +++ b/arch/tile/kernel/hvglue.lds | |||
@@ -0,0 +1,58 @@ | |||
1 | /* Hypervisor call vector addresses; see <hv/hypervisor.h> */ | ||
2 | hv_init = TEXT_OFFSET + 0x10020; | ||
3 | hv_install_context = TEXT_OFFSET + 0x10040; | ||
4 | hv_sysconf = TEXT_OFFSET + 0x10060; | ||
5 | hv_get_rtc = TEXT_OFFSET + 0x10080; | ||
6 | hv_set_rtc = TEXT_OFFSET + 0x100a0; | ||
7 | hv_flush_asid = TEXT_OFFSET + 0x100c0; | ||
8 | hv_flush_page = TEXT_OFFSET + 0x100e0; | ||
9 | hv_flush_pages = TEXT_OFFSET + 0x10100; | ||
10 | hv_restart = TEXT_OFFSET + 0x10120; | ||
11 | hv_halt = TEXT_OFFSET + 0x10140; | ||
12 | hv_power_off = TEXT_OFFSET + 0x10160; | ||
13 | hv_inquire_physical = TEXT_OFFSET + 0x10180; | ||
14 | hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0; | ||
15 | hv_inquire_virtual = TEXT_OFFSET + 0x101c0; | ||
16 | hv_inquire_asid = TEXT_OFFSET + 0x101e0; | ||
17 | hv_nanosleep = TEXT_OFFSET + 0x10200; | ||
18 | hv_console_read_if_ready = TEXT_OFFSET + 0x10220; | ||
19 | hv_console_write = TEXT_OFFSET + 0x10240; | ||
20 | hv_downcall_dispatch = TEXT_OFFSET + 0x10260; | ||
21 | hv_inquire_topology = TEXT_OFFSET + 0x10280; | ||
22 | hv_fs_findfile = TEXT_OFFSET + 0x102a0; | ||
23 | hv_fs_fstat = TEXT_OFFSET + 0x102c0; | ||
24 | hv_fs_pread = TEXT_OFFSET + 0x102e0; | ||
25 | hv_physaddr_read64 = TEXT_OFFSET + 0x10300; | ||
26 | hv_physaddr_write64 = TEXT_OFFSET + 0x10320; | ||
27 | hv_get_command_line = TEXT_OFFSET + 0x10340; | ||
28 | hv_set_caching = TEXT_OFFSET + 0x10360; | ||
29 | hv_bzero_page = TEXT_OFFSET + 0x10380; | ||
30 | hv_register_message_state = TEXT_OFFSET + 0x103a0; | ||
31 | hv_send_message = TEXT_OFFSET + 0x103c0; | ||
32 | hv_receive_message = TEXT_OFFSET + 0x103e0; | ||
33 | hv_inquire_context = TEXT_OFFSET + 0x10400; | ||
34 | hv_start_all_tiles = TEXT_OFFSET + 0x10420; | ||
35 | hv_dev_open = TEXT_OFFSET + 0x10440; | ||
36 | hv_dev_close = TEXT_OFFSET + 0x10460; | ||
37 | hv_dev_pread = TEXT_OFFSET + 0x10480; | ||
38 | hv_dev_pwrite = TEXT_OFFSET + 0x104a0; | ||
39 | hv_dev_poll = TEXT_OFFSET + 0x104c0; | ||
40 | hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0; | ||
41 | hv_dev_preada = TEXT_OFFSET + 0x10500; | ||
42 | hv_dev_pwritea = TEXT_OFFSET + 0x10520; | ||
43 | hv_flush_remote = TEXT_OFFSET + 0x10540; | ||
44 | hv_console_putc = TEXT_OFFSET + 0x10560; | ||
45 | hv_inquire_tiles = TEXT_OFFSET + 0x10580; | ||
46 | hv_confstr = TEXT_OFFSET + 0x105a0; | ||
47 | hv_reexec = TEXT_OFFSET + 0x105c0; | ||
48 | hv_set_command_line = TEXT_OFFSET + 0x105e0; | ||
49 | hv_clear_intr = TEXT_OFFSET + 0x10600; | ||
50 | hv_enable_intr = TEXT_OFFSET + 0x10620; | ||
51 | hv_disable_intr = TEXT_OFFSET + 0x10640; | ||
52 | hv_raise_intr = TEXT_OFFSET + 0x10660; | ||
53 | hv_trigger_ipi = TEXT_OFFSET + 0x10680; | ||
54 | hv_store_mapping = TEXT_OFFSET + 0x106a0; | ||
55 | hv_inquire_realpa = TEXT_OFFSET + 0x106c0; | ||
56 | hv_flush_all = TEXT_OFFSET + 0x106e0; | ||
57 | hv_get_ipi_pte = TEXT_OFFSET + 0x10700; | ||
58 | hv_glue_internals = TEXT_OFFSET + 0x10720; | ||
diff --git a/arch/tile/kernel/init_task.c b/arch/tile/kernel/init_task.c new file mode 100644 index 000000000000..928b31870669 --- /dev/null +++ b/arch/tile/kernel/init_task.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/init_task.h> | ||
18 | #include <linux/mqueue.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/start_kernel.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
24 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
25 | |||
26 | /* | ||
27 | * Initial thread structure. | ||
28 | * | ||
29 | * We need to make sure that this is THREAD_SIZE aligned due to the | ||
30 | * way process stacks are handled. This is done by having a special | ||
31 | * "init_task" linker map entry.. | ||
32 | */ | ||
33 | union thread_union init_thread_union __init_task_data = { | ||
34 | INIT_THREAD_INFO(init_task) | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Initial task structure. | ||
39 | * | ||
40 | * All other task structs will be allocated on slabs in fork.c | ||
41 | */ | ||
42 | struct task_struct init_task = INIT_TASK(init_task); | ||
43 | EXPORT_SYMBOL(init_task); | ||
44 | |||
45 | /* | ||
46 | * per-CPU stack and boot info. | ||
47 | */ | ||
48 | DEFINE_PER_CPU(unsigned long, boot_sp) = | ||
49 | (unsigned long)init_stack + THREAD_SIZE; | ||
50 | |||
51 | #ifdef CONFIG_SMP | ||
52 | DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; | ||
53 | #else | ||
54 | /* | ||
55 | * The variable must be __initdata since it references __init code. | ||
56 | * With CONFIG_SMP it is per-cpu data, which is exempt from validation. | ||
57 | */ | ||
58 | unsigned long __initdata boot_pc = (unsigned long)start_kernel; | ||
59 | #endif | ||
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S new file mode 100644 index 000000000000..3404c75f8e64 --- /dev/null +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -0,0 +1,2008 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Linux interrupt vectors. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/unistd.h> | ||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/thread_info.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/asm-offsets.h> | ||
26 | #include <hv/hypervisor.h> | ||
27 | #include <arch/abi.h> | ||
28 | #include <arch/interrupts.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | #ifdef CONFIG_PREEMPT | ||
32 | # error "No support for kernel preemption currently" | ||
33 | #endif | ||
34 | |||
35 | #if INT_INTCTRL_1 < 32 || INT_INTCTRL_1 >= 48 | ||
36 | # error INT_INTCTRL_1 coded to set high interrupt mask | ||
37 | #endif | ||
38 | |||
39 | #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) | ||
40 | |||
41 | #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) | ||
42 | |||
43 | #if !CHIP_HAS_WH64() | ||
44 | /* By making this an empty macro, we can use wh64 in the code. */ | ||
45 | .macro wh64 reg | ||
46 | .endm | ||
47 | #endif | ||
48 | |||
49 | .macro push_reg reg, ptr=sp, delta=-4 | ||
50 | { | ||
51 | sw \ptr, \reg | ||
52 | addli \ptr, \ptr, \delta | ||
53 | } | ||
54 | .endm | ||
55 | |||
56 | .macro pop_reg reg, ptr=sp, delta=4 | ||
57 | { | ||
58 | lw \reg, \ptr | ||
59 | addli \ptr, \ptr, \delta | ||
60 | } | ||
61 | .endm | ||
62 | |||
63 | .macro pop_reg_zero reg, zreg, ptr=sp, delta=4 | ||
64 | { | ||
65 | move \zreg, zero | ||
66 | lw \reg, \ptr | ||
67 | addi \ptr, \ptr, \delta | ||
68 | } | ||
69 | .endm | ||
70 | |||
71 | .macro push_extra_callee_saves reg | ||
72 | PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51)) | ||
73 | push_reg r51, \reg | ||
74 | push_reg r50, \reg | ||
75 | push_reg r49, \reg | ||
76 | push_reg r48, \reg | ||
77 | push_reg r47, \reg | ||
78 | push_reg r46, \reg | ||
79 | push_reg r45, \reg | ||
80 | push_reg r44, \reg | ||
81 | push_reg r43, \reg | ||
82 | push_reg r42, \reg | ||
83 | push_reg r41, \reg | ||
84 | push_reg r40, \reg | ||
85 | push_reg r39, \reg | ||
86 | push_reg r38, \reg | ||
87 | push_reg r37, \reg | ||
88 | push_reg r36, \reg | ||
89 | push_reg r35, \reg | ||
90 | push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34) | ||
91 | .endm | ||
92 | |||
93 | .macro panic str | ||
94 | .pushsection .rodata, "a" | ||
95 | 1: | ||
96 | .asciz "\str" | ||
97 | .popsection | ||
98 | { | ||
99 | moveli r0, lo16(1b) | ||
100 | } | ||
101 | { | ||
102 | auli r0, r0, ha16(1b) | ||
103 | jal panic | ||
104 | } | ||
105 | .endm | ||
106 | |||
107 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
108 | .pushsection .text.intvec_feedback,"ax" | ||
109 | intvec_feedback: | ||
110 | .popsection | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * Default interrupt handler. | ||
115 | * | ||
116 | * vecnum is where we'll put this code. | ||
117 | * c_routine is the C routine we'll call. | ||
118 | * | ||
119 | * The C routine is passed two arguments: | ||
120 | * - A pointer to the pt_regs state. | ||
121 | * - The interrupt vector number. | ||
122 | * | ||
123 | * The "processing" argument specifies the code for processing | ||
124 | * the interrupt. Defaults to "handle_interrupt". | ||
125 | */ | ||
126 | .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt | ||
127 | .org (\vecnum << 8) | ||
128 | intvec_\vecname: | ||
129 | .ifc \vecnum, INT_SWINT_1 | ||
130 | blz TREG_SYSCALL_NR_NAME, sys_cmpxchg | ||
131 | .endif | ||
132 | |||
133 | /* Temporarily save a register so we have somewhere to work. */ | ||
134 | |||
135 | mtspr SYSTEM_SAVE_1_1, r0 | ||
136 | mfspr r0, EX_CONTEXT_1_1 | ||
137 | |||
138 | /* The cmpxchg code clears sp to force us to reset it here on fault. */ | ||
139 | { | ||
140 | bz sp, 2f | ||
141 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
142 | } | ||
143 | |||
144 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
145 | /* | ||
146 | * For double-faults from user-space, fall through to the normal | ||
147 | * register save and stack setup path. Otherwise, it's the | ||
148 | * hypervisor giving us one last chance to dump diagnostics, and we | ||
149 | * branch to the kernel_double_fault routine to do so. | ||
150 | */ | ||
151 | bz r0, 1f | ||
152 | j _kernel_double_fault | ||
153 | 1: | ||
154 | .else | ||
155 | /* | ||
156 | * If we're coming from user-space, then set sp to the top of | ||
157 | * the kernel stack. Otherwise, assume sp is already valid. | ||
158 | */ | ||
159 | { | ||
160 | bnz r0, 0f | ||
161 | move r0, sp | ||
162 | } | ||
163 | .endif | ||
164 | |||
165 | .ifc \c_routine, do_page_fault | ||
166 | /* | ||
167 | * The page_fault handler may be downcalled directly by the | ||
168 | * hypervisor even when Linux is running and has ICS set. | ||
169 | * | ||
170 | * In this case the contents of EX_CONTEXT_1_1 reflect the | ||
171 | * previous fault and can't be relied on to choose whether or | ||
172 | * not to reinitialize the stack pointer. So we add a test | ||
173 | * to see whether SYSTEM_SAVE_1_2 has the high bit set, | ||
174 | * and if so we don't reinitialize sp, since we must be coming | ||
175 | * from Linux. (In fact the precise case is !(val & ~1), | ||
176 | * but any Linux PC has to have the high bit set.) | ||
177 | * | ||
178 | * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for | ||
179 | * any path that turns into a downcall to one of our TLB handlers. | ||
180 | */ | ||
181 | mfspr r0, SYSTEM_SAVE_1_2 | ||
182 | { | ||
183 | blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ | ||
184 | move r0, sp | ||
185 | } | ||
186 | .endif | ||
187 | |||
188 | 2: | ||
189 | /* | ||
190 | * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and | ||
191 | * the current stack top in the higher bits. So we recover | ||
192 | * our stack top by just masking off the low bits, then | ||
193 | * point sp at the top aligned address on the actual stack page. | ||
194 | */ | ||
195 | mfspr r0, SYSTEM_SAVE_1_0 | ||
196 | mm r0, r0, zero, LOG2_THREAD_SIZE, 31 | ||
197 | |||
198 | 0: | ||
199 | /* | ||
200 | * Align the stack mod 64 so we can properly predict what | ||
201 | * cache lines we need to write-hint to reduce memory fetch | ||
202 | * latency as we enter the kernel. The layout of memory is | ||
203 | * as follows, with cache line 0 at the lowest VA, and cache | ||
204 | * line 4 just below the r0 value this "andi" computes. | ||
205 | * Note that we never write to cache line 4, and we skip | ||
206 | * cache line 1 for syscalls. | ||
207 | * | ||
208 | * cache line 4: ptregs padding (two words) | ||
209 | * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad | ||
210 | * cache line 2: r30...r45 | ||
211 | * cache line 1: r14...r29 | ||
212 | * cache line 0: 2 x frame, r0..r13 | ||
213 | */ | ||
214 | andi r0, r0, -64 | ||
215 | |||
216 | /* | ||
217 | * Push the first four registers on the stack, so that we can set | ||
218 | * them to vector-unique values before we jump to the common code. | ||
219 | * | ||
220 | * Registers are pushed on the stack as a struct pt_regs, | ||
221 | * with the sp initially just above the struct, and when we're | ||
222 | * done, sp points to the base of the struct, minus | ||
223 | * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code. | ||
224 | * | ||
225 | * This routine saves just the first four registers, plus the | ||
226 | * stack context so we can do proper backtracing right away, | ||
227 | * and defers to handle_interrupt to save the rest. | ||
228 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. | ||
229 | */ | ||
230 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) | ||
231 | wh64 r0 /* cache line 3 */ | ||
232 | { | ||
233 | sw r0, lr | ||
234 | addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
235 | } | ||
236 | { | ||
237 | sw r0, sp | ||
238 | addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP | ||
239 | } | ||
240 | { | ||
241 | sw sp, r52 | ||
242 | addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52) | ||
243 | } | ||
244 | wh64 sp /* cache line 0 */ | ||
245 | { | ||
246 | sw sp, r1 | ||
247 | addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1) | ||
248 | } | ||
249 | { | ||
250 | sw sp, r2 | ||
251 | addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2) | ||
252 | } | ||
253 | { | ||
254 | sw sp, r3 | ||
255 | addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) | ||
256 | } | ||
257 | mfspr r0, EX_CONTEXT_1_0 | ||
258 | .ifc \processing,handle_syscall | ||
259 | /* | ||
260 | * Bump the saved PC by one bundle so that when we return, we won't | ||
261 | * execute the same swint instruction again. We need to do this while | ||
262 | * we're in the critical section. | ||
263 | */ | ||
264 | addi r0, r0, 8 | ||
265 | .endif | ||
266 | { | ||
267 | sw sp, r0 | ||
268 | addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
269 | } | ||
270 | mfspr r0, EX_CONTEXT_1_1 | ||
271 | { | ||
272 | sw sp, r0 | ||
273 | addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
274 | /* | ||
275 | * Use r0 for syscalls so it's a temporary; use r1 for interrupts | ||
276 | * so that it gets passed through unchanged to the handler routine. | ||
277 | * Note that the .if conditional confusingly spans bundles. | ||
278 | */ | ||
279 | .ifc \processing,handle_syscall | ||
280 | movei r0, \vecnum | ||
281 | } | ||
282 | { | ||
283 | sw sp, r0 | ||
284 | .else | ||
285 | movei r1, \vecnum | ||
286 | } | ||
287 | { | ||
288 | sw sp, r1 | ||
289 | .endif | ||
290 | addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM | ||
291 | } | ||
292 | mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ | ||
293 | { | ||
294 | sw sp, r0 | ||
295 | addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 | ||
296 | } | ||
297 | { | ||
298 | sw sp, zero /* write zero into "Next SP" frame pointer */ | ||
299 | addi sp, sp, -4 /* leave SP pointing at bottom of frame */ | ||
300 | } | ||
301 | .ifc \processing,handle_syscall | ||
302 | j handle_syscall | ||
303 | .else | ||
304 | /* | ||
305 | * Capture per-interrupt SPR context to registers. | ||
306 | * We overload the meaning of r3 on this path such that if its bit 31 | ||
307 | * is set, we have to mask all interrupts including NMIs before | ||
308 | * clearing the interrupt critical section bit. | ||
309 | * See discussion below at "finish_interrupt_save". | ||
310 | */ | ||
311 | .ifc \c_routine, do_page_fault | ||
312 | mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ | ||
313 | mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ | ||
314 | .else | ||
315 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
316 | { | ||
317 | mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ | ||
318 | movei r3, 0 | ||
319 | } | ||
320 | .else | ||
321 | .ifc \c_routine, do_trap | ||
322 | { | ||
323 | mfspr r2, GPV_REASON | ||
324 | movei r3, 0 | ||
325 | } | ||
326 | .else | ||
327 | .ifc \c_routine, op_handle_perf_interrupt | ||
328 | { | ||
329 | mfspr r2, PERF_COUNT_STS | ||
330 | movei r3, -1 /* not used, but set for consistency */ | ||
331 | } | ||
332 | .else | ||
333 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
334 | .ifc \c_routine, op_handle_aux_perf_interrupt | ||
335 | { | ||
336 | mfspr r2, AUX_PERF_COUNT_STS | ||
337 | movei r3, -1 /* not used, but set for consistency */ | ||
338 | } | ||
339 | .else | ||
340 | #endif | ||
341 | movei r3, 0 | ||
342 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
343 | .endif | ||
344 | #endif | ||
345 | .endif | ||
346 | .endif | ||
347 | .endif | ||
348 | .endif | ||
349 | /* Put function pointer in r0 */ | ||
350 | moveli r0, lo16(\c_routine) | ||
351 | { | ||
352 | auli r0, r0, ha16(\c_routine) | ||
353 | j \processing | ||
354 | } | ||
355 | .endif | ||
356 | ENDPROC(intvec_\vecname) | ||
357 | |||
358 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
359 | .pushsection .text.intvec_feedback,"ax" | ||
360 | .org (\vecnum << 5) | ||
361 | FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) | ||
362 | jrp lr | ||
363 | .popsection | ||
364 | #endif | ||
365 | |||
366 | .endm | ||
367 | |||
368 | |||
369 | /* | ||
370 | * Save the rest of the registers that we didn't save in the actual | ||
371 | * vector itself. We can't use r0-r10 inclusive here. | ||
372 | */ | ||
373 | .macro finish_interrupt_save, function | ||
374 | |||
375 | /* If it's a syscall, save a proper orig_r0, otherwise just zero. */ | ||
376 | PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0) | ||
377 | { | ||
378 | .ifc \function,handle_syscall | ||
379 | sw r52, r0 | ||
380 | .else | ||
381 | sw r52, zero | ||
382 | .endif | ||
383 | PTREGS_PTR(r52, PTREGS_OFFSET_TP) | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * For ordinary syscalls, we save neither caller- nor callee- | ||
388 | * save registers, since the syscall invoker doesn't expect the | ||
389 | * caller-saves to be saved, and the called kernel functions will | ||
390 | * take care of saving the callee-saves for us. | ||
391 | * | ||
392 | * For interrupts we save just the caller-save registers. Saving | ||
393 | * them is required (since the "caller" can't save them). Again, | ||
394 | * the called kernel functions will restore the callee-save | ||
395 | * registers for us appropriately. | ||
396 | * | ||
397 | * On return, we normally restore nothing special for syscalls, | ||
398 | * and just the caller-save registers for interrupts. | ||
399 | * | ||
400 | * However, there are some important caveats to all this: | ||
401 | * | ||
402 | * - We always save a few callee-save registers to give us | ||
403 | * some scratchpad registers to carry across function calls. | ||
404 | * | ||
405 | * - fork/vfork/etc require us to save all the callee-save | ||
406 | * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below. | ||
407 | * | ||
408 | * - We always save r0..r5 and r10 for syscalls, since we need | ||
409 | * to reload them a bit later for the actual kernel call, and | ||
410 | * since we might need them for -ERESTARTNOINTR, etc. | ||
411 | * | ||
412 | * - Before invoking a signal handler, we save the unsaved | ||
413 | * callee-save registers so they are visible to the | ||
414 | * signal handler or any ptracer. | ||
415 | * | ||
416 | * - If the unsaved callee-save registers are modified, we set | ||
417 | * a bit in pt_regs so we know to reload them from pt_regs | ||
418 | * and not just rely on the kernel function unwinding. | ||
419 | * (Done for ptrace register writes and SA_SIGINFO handler.) | ||
420 | */ | ||
421 | { | ||
422 | sw r52, tp | ||
423 | PTREGS_PTR(r52, PTREGS_OFFSET_REG(33)) | ||
424 | } | ||
425 | wh64 r52 /* cache line 2 */ | ||
426 | push_reg r33, r52 | ||
427 | push_reg r32, r52 | ||
428 | push_reg r31, r52 | ||
429 | .ifc \function,handle_syscall | ||
430 | push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30) | ||
431 | push_reg TREG_SYSCALL_NR_NAME, r52, \ | ||
432 | PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL | ||
433 | .else | ||
434 | |||
435 | push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30) | ||
436 | wh64 r52 /* cache line 1 */ | ||
437 | push_reg r29, r52 | ||
438 | push_reg r28, r52 | ||
439 | push_reg r27, r52 | ||
440 | push_reg r26, r52 | ||
441 | push_reg r25, r52 | ||
442 | push_reg r24, r52 | ||
443 | push_reg r23, r52 | ||
444 | push_reg r22, r52 | ||
445 | push_reg r21, r52 | ||
446 | push_reg r20, r52 | ||
447 | push_reg r19, r52 | ||
448 | push_reg r18, r52 | ||
449 | push_reg r17, r52 | ||
450 | push_reg r16, r52 | ||
451 | push_reg r15, r52 | ||
452 | push_reg r14, r52 | ||
453 | push_reg r13, r52 | ||
454 | push_reg r12, r52 | ||
455 | push_reg r11, r52 | ||
456 | push_reg r10, r52 | ||
457 | push_reg r9, r52 | ||
458 | push_reg r8, r52 | ||
459 | push_reg r7, r52 | ||
460 | push_reg r6, r52 | ||
461 | |||
462 | .endif | ||
463 | |||
464 | push_reg r5, r52 | ||
465 | sw r52, r4 | ||
466 | |||
467 | /* Load tp with our per-cpu offset. */ | ||
468 | #ifdef CONFIG_SMP | ||
469 | { | ||
470 | mfspr r20, SYSTEM_SAVE_1_0 | ||
471 | moveli r21, lo16(__per_cpu_offset) | ||
472 | } | ||
473 | { | ||
474 | auli r21, r21, ha16(__per_cpu_offset) | ||
475 | mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1 | ||
476 | } | ||
477 | s2a r20, r20, r21 | ||
478 | lw tp, r20 | ||
479 | #else | ||
480 | move tp, zero | ||
481 | #endif | ||
482 | |||
483 | /* | ||
484 | * If we will be returning to the kernel, we will need to | ||
485 | * reset the interrupt masks to the state they had before. | ||
486 | * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled. | ||
487 | * We load flags in r32 here so we can jump to .Lrestore_regs | ||
488 | * directly after do_page_fault_ics() if necessary. | ||
489 | */ | ||
490 | mfspr r32, EX_CONTEXT_1_1 | ||
491 | { | ||
492 | andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
493 | PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) | ||
494 | } | ||
495 | bzt r32, 1f /* zero if from user space */ | ||
496 | IRQS_DISABLED(r32) /* zero if irqs enabled */ | ||
497 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
498 | # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix | ||
499 | #endif | ||
500 | 1: | ||
501 | .ifnc \function,handle_syscall | ||
502 | /* Record the fact that we saved the caller-save registers above. */ | ||
503 | ori r32, r32, PT_FLAGS_CALLER_SAVES | ||
504 | .endif | ||
505 | sw r21, r32 | ||
506 | |||
507 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
508 | /* | ||
509 | * Notify the feedback routines that we were in the | ||
510 | * appropriate fixed interrupt vector area. Note that we | ||
511 | * still have ICS set at this point, so we can't invoke any | ||
512 | * atomic operations or we will panic. The feedback | ||
513 | * routines internally preserve r0..r10 and r30 up. | ||
514 | */ | ||
515 | .ifnc \function,handle_syscall | ||
516 | shli r20, r1, 5 | ||
517 | .else | ||
518 | moveli r20, INT_SWINT_1 << 5 | ||
519 | .endif | ||
520 | addli r20, r20, lo16(intvec_feedback) | ||
521 | auli r20, r20, ha16(intvec_feedback) | ||
522 | jalr r20 | ||
523 | |||
524 | /* And now notify the feedback routines that we are here. */ | ||
525 | FEEDBACK_ENTER(\function) | ||
526 | #endif | ||
527 | |||
528 | /* | ||
529 | * we've captured enough state to the stack (including in | ||
530 | * particular our EX_CONTEXT state) that we can now release | ||
531 | * the interrupt critical section and replace it with our | ||
532 | * standard "interrupts disabled" mask value. This allows | ||
533 | * synchronous interrupts (and profile interrupts) to punch | ||
534 | * through from this point onwards. | ||
535 | * | ||
536 | * If bit 31 of r3 is set during a non-NMI interrupt, we know we | ||
537 | * are on the path where the hypervisor has punched through our | ||
538 | * ICS with a page fault, so we call out to do_page_fault_ics() | ||
539 | * to figure out what to do with it. If the fault was in | ||
540 | * an atomic op, we unlock the atomic lock, adjust the | ||
541 | * saved register state a little, and return "zero" in r4, | ||
542 | * falling through into the normal page-fault interrupt code. | ||
543 | * If the fault was in a kernel-space atomic operation, then | ||
544 | * do_page_fault_ics() resolves it itself, returns "one" in r4, | ||
545 | * and as a result goes directly to restoring registers and iret, | ||
546 | * without trying to adjust the interrupt masks at all. | ||
547 | * The do_page_fault_ics() API involves passing and returning | ||
548 | * a five-word struct (in registers) to avoid writing the | ||
549 | * save and restore code here. | ||
550 | */ | ||
551 | .ifc \function,handle_nmi | ||
552 | IRQ_DISABLE_ALL(r20) | ||
553 | .else | ||
554 | .ifnc \function,handle_syscall | ||
555 | bgezt r3, 1f | ||
556 | { | ||
557 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
558 | jal do_page_fault_ics | ||
559 | } | ||
560 | FEEDBACK_REENTER(\function) | ||
561 | bzt r4, 1f | ||
562 | j .Lrestore_regs | ||
563 | 1: | ||
564 | .endif | ||
565 | IRQ_DISABLE(r20, r21) | ||
566 | .endif | ||
567 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
568 | |||
569 | #if CHIP_HAS_WH64() | ||
570 | /* | ||
571 | * Prepare the first 256 stack bytes to be rapidly accessible | ||
572 | * without having to fetch the background data. We don't really | ||
573 | * know how far to write-hint, but kernel stacks generally | ||
574 | * aren't that big, and write-hinting here does take some time. | ||
575 | */ | ||
576 | addi r52, sp, -64 | ||
577 | { | ||
578 | wh64 r52 | ||
579 | addi r52, r52, -64 | ||
580 | } | ||
581 | { | ||
582 | wh64 r52 | ||
583 | addi r52, r52, -64 | ||
584 | } | ||
585 | { | ||
586 | wh64 r52 | ||
587 | addi r52, r52, -64 | ||
588 | } | ||
589 | wh64 r52 | ||
590 | #endif | ||
591 | |||
592 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
593 | .ifnc \function,handle_nmi | ||
594 | /* | ||
595 | * We finally have enough state set up to notify the irq | ||
596 | * tracing code that irqs were disabled on entry to the handler. | ||
597 | * The TRACE_IRQS_OFF call clobbers registers r0-r29. | ||
598 | * For syscalls, we already have the register state saved away | ||
599 | * on the stack, so we don't bother to do any register saves here, | ||
600 | * and later we pop the registers back off the kernel stack. | ||
601 | * For interrupt handlers, save r0-r3 in callee-saved registers. | ||
602 | */ | ||
603 | .ifnc \function,handle_syscall | ||
604 | { move r30, r0; move r31, r1 } | ||
605 | { move r32, r2; move r33, r3 } | ||
606 | .endif | ||
607 | TRACE_IRQS_OFF | ||
608 | .ifnc \function,handle_syscall | ||
609 | { move r0, r30; move r1, r31 } | ||
610 | { move r2, r32; move r3, r33 } | ||
611 | .endif | ||
612 | .endif | ||
613 | #endif | ||
614 | |||
615 | .endm | ||
616 | |||
617 | .macro check_single_stepping, kind, not_single_stepping | ||
618 | /* | ||
619 | * Check for single stepping in user-level priv | ||
620 | * kind can be "normal", "ill", or "syscall" | ||
621 | * At end, if fall-thru | ||
622 | * r29: thread_info->step_state | ||
623 | * r28: &pt_regs->pc | ||
624 | * r27: pt_regs->pc | ||
625 | * r26: thread_info->step_state->buffer | ||
626 | */ | ||
627 | |||
628 | /* Check for single stepping */ | ||
629 | GET_THREAD_INFO(r29) | ||
630 | { | ||
631 | /* Get pointer to field holding step state */ | ||
632 | addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET | ||
633 | |||
634 | /* Get pointer to EX1 in register state */ | ||
635 | PTREGS_PTR(r27, PTREGS_OFFSET_EX1) | ||
636 | } | ||
637 | { | ||
638 | /* Get pointer to field holding PC */ | ||
639 | PTREGS_PTR(r28, PTREGS_OFFSET_PC) | ||
640 | |||
641 | /* Load the pointer to the step state */ | ||
642 | lw r29, r29 | ||
643 | } | ||
644 | /* Load EX1 */ | ||
645 | lw r27, r27 | ||
646 | { | ||
647 | /* Points to flags */ | ||
648 | addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET | ||
649 | |||
650 | /* No single stepping if there is no step state structure */ | ||
651 | bzt r29, \not_single_stepping | ||
652 | } | ||
653 | { | ||
654 | /* mask off ICS and any other high bits */ | ||
655 | andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK | ||
656 | |||
657 | /* Load pointer to single step instruction buffer */ | ||
658 | lw r26, r29 | ||
659 | } | ||
660 | /* Check priv state */ | ||
661 | bnz r27, \not_single_stepping | ||
662 | |||
663 | /* Get flags */ | ||
664 | lw r22, r23 | ||
665 | { | ||
666 | /* Branch if single-step mode not enabled */ | ||
667 | bbnst r22, \not_single_stepping | ||
668 | |||
669 | /* Clear enabled flag */ | ||
670 | andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED | ||
671 | } | ||
672 | .ifc \kind,normal | ||
673 | { | ||
674 | /* Load PC */ | ||
675 | lw r27, r28 | ||
676 | |||
677 | /* Point to the entry containing the original PC */ | ||
678 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET | ||
679 | } | ||
680 | { | ||
681 | /* Disable single stepping flag */ | ||
682 | sw r23, r22 | ||
683 | } | ||
684 | { | ||
685 | /* Get the original pc */ | ||
686 | lw r24, r24 | ||
687 | |||
688 | /* See if the PC is at the start of the single step buffer */ | ||
689 | seq r25, r26, r27 | ||
690 | } | ||
691 | /* | ||
692 | * NOTE: it is really expected that the PC be in the single step buffer | ||
693 | * at this point | ||
694 | */ | ||
695 | bzt r25, \not_single_stepping | ||
696 | |||
697 | /* Restore the original PC */ | ||
698 | sw r28, r24 | ||
699 | .else | ||
700 | .ifc \kind,syscall | ||
701 | { | ||
702 | /* Load PC */ | ||
703 | lw r27, r28 | ||
704 | |||
705 | /* Point to the entry containing the next PC */ | ||
706 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET | ||
707 | } | ||
708 | { | ||
709 | /* Increment the stopped PC by the bundle size */ | ||
710 | addi r26, r26, 8 | ||
711 | |||
712 | /* Disable single stepping flag */ | ||
713 | sw r23, r22 | ||
714 | } | ||
715 | { | ||
716 | /* Get the next pc */ | ||
717 | lw r24, r24 | ||
718 | |||
719 | /* | ||
720 | * See if the PC is one bundle past the start of the | ||
721 | * single step buffer | ||
722 | */ | ||
723 | seq r25, r26, r27 | ||
724 | } | ||
725 | { | ||
726 | /* | ||
727 | * NOTE: it is really expected that the PC be in the | ||
728 | * single step buffer at this point | ||
729 | */ | ||
730 | bzt r25, \not_single_stepping | ||
731 | } | ||
732 | /* Set to the next PC */ | ||
733 | sw r28, r24 | ||
734 | .else | ||
735 | { | ||
736 | /* Point to 3rd bundle in buffer */ | ||
737 | addi r25, r26, 16 | ||
738 | |||
739 | /* Load PC */ | ||
740 | lw r27, r28 | ||
741 | } | ||
742 | { | ||
743 | /* Disable single stepping flag */ | ||
744 | sw r23, r22 | ||
745 | |||
746 | /* See if the PC is in the single step buffer */ | ||
747 | slte_u r24, r26, r27 | ||
748 | } | ||
749 | { | ||
750 | slte_u r25, r27, r25 | ||
751 | |||
752 | /* | ||
753 | * NOTE: it is really expected that the PC be in the | ||
754 | * single step buffer at this point | ||
755 | */ | ||
756 | bzt r24, \not_single_stepping | ||
757 | } | ||
758 | bzt r25, \not_single_stepping | ||
759 | .endif | ||
760 | .endif | ||
761 | .endm | ||
762 | |||
763 | /* | ||
764 | * Redispatch a downcall. | ||
765 | */ | ||
766 | .macro dc_dispatch vecnum, vecname | ||
767 | .org (\vecnum << 8) | ||
768 | intvec_\vecname: | ||
769 | j hv_downcall_dispatch | ||
770 | ENDPROC(intvec_\vecname) | ||
771 | .endm | ||
772 | |||
773 | /* | ||
774 | * Common code for most interrupts. The C function we're eventually | ||
775 | * going to is in r0, and the faultnum is in r1; the original | ||
776 | * values for those registers are on the stack. | ||
777 | */ | ||
778 | .pushsection .text.handle_interrupt,"ax" | ||
779 | handle_interrupt: | ||
780 | finish_interrupt_save handle_interrupt | ||
781 | |||
782 | /* | ||
783 | * Check for if we are single stepping in user level. If so, then | ||
784 | * we need to restore the PC. | ||
785 | */ | ||
786 | |||
787 | check_single_stepping normal, .Ldispatch_interrupt | ||
788 | .Ldispatch_interrupt: | ||
789 | |||
790 | /* Jump to the C routine; it should enable irqs as soon as possible. */ | ||
791 | { | ||
792 | jalr r0 | ||
793 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
794 | } | ||
795 | FEEDBACK_REENTER(handle_interrupt) | ||
796 | { | ||
797 | movei r30, 0 /* not an NMI */ | ||
798 | j interrupt_return | ||
799 | } | ||
800 | STD_ENDPROC(handle_interrupt) | ||
801 | |||
802 | /* | ||
803 | * This routine takes a boolean in r30 indicating if this is an NMI. | ||
804 | * If so, we also expect a boolean in r31 indicating whether to | ||
805 | * re-enable the oprofile interrupts. | ||
806 | */ | ||
807 | STD_ENTRY(interrupt_return) | ||
808 | /* If we're resuming to kernel space, don't check thread flags. */ | ||
809 | { | ||
810 | bnz r30, .Lrestore_all /* NMIs don't special-case user-space */ | ||
811 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
812 | } | ||
813 | lw r29, r29 | ||
814 | andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
815 | { | ||
816 | bzt r29, .Lresume_userspace | ||
817 | PTREGS_PTR(r29, PTREGS_OFFSET_PC) | ||
818 | } | ||
819 | |||
820 | /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ | ||
821 | { | ||
822 | lw r28, r29 | ||
823 | moveli r27, lo16(_cpu_idle_nap) | ||
824 | } | ||
825 | { | ||
826 | auli r27, r27, ha16(_cpu_idle_nap) | ||
827 | } | ||
828 | { | ||
829 | seq r27, r27, r28 | ||
830 | } | ||
831 | { | ||
832 | bbns r27, .Lrestore_all | ||
833 | addi r28, r28, 8 | ||
834 | } | ||
835 | sw r29, r28 | ||
836 | j .Lrestore_all | ||
837 | |||
838 | .Lresume_userspace: | ||
839 | FEEDBACK_REENTER(interrupt_return) | ||
840 | |||
841 | /* | ||
842 | * Disable interrupts so as to make sure we don't | ||
843 | * miss an interrupt that sets any of the thread flags (like | ||
844 | * need_resched or sigpending) between sampling and the iret. | ||
845 | * Routines like schedule() or do_signal() may re-enable | ||
846 | * interrupts before returning. | ||
847 | */ | ||
848 | IRQ_DISABLE(r20, r21) | ||
849 | TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ | ||
850 | |||
851 | /* Get base of stack in r32; note r30/31 are used as arguments here. */ | ||
852 | GET_THREAD_INFO(r32) | ||
853 | |||
854 | |||
855 | /* Check to see if there is any work to do before returning to user. */ | ||
856 | { | ||
857 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET | ||
858 | moveli r28, lo16(_TIF_ALLWORK_MASK) | ||
859 | } | ||
860 | { | ||
861 | lw r29, r29 | ||
862 | auli r28, r28, ha16(_TIF_ALLWORK_MASK) | ||
863 | } | ||
864 | and r28, r29, r28 | ||
865 | bnz r28, .Lwork_pending | ||
866 | |||
867 | /* | ||
868 | * In the NMI case we | ||
869 | * omit the call to single_process_check_nohz, which normally checks | ||
870 | * to see if we should start or stop the scheduler tick, because | ||
871 | * we can't call arbitrary Linux code from an NMI context. | ||
872 | * We always call the homecache TLB deferral code to re-trigger | ||
873 | * the deferral mechanism. | ||
874 | * | ||
875 | * The other chunk of responsibility this code has is to reset the | ||
876 | * interrupt masks appropriately to reset irqs and NMIs. We have | ||
877 | * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the | ||
878 | * lockdep-type stuff, but we can't set ICS until afterwards, since | ||
879 | * ICS can only be used in very tight chunks of code to avoid | ||
880 | * tripping over various assertions that it is off. | ||
881 | * | ||
882 | * (There is what looks like a window of vulnerability here since | ||
883 | * we might take a profile interrupt between the two SPR writes | ||
884 | * that set the mask, but since we write the low SPR word first, | ||
885 | * and our interrupt entry code checks the low SPR word, any | ||
886 | * profile interrupt will actually disable interrupts in both SPRs | ||
887 | * before returning, which is OK.) | ||
888 | */ | ||
889 | .Lrestore_all: | ||
890 | PTREGS_PTR(r0, PTREGS_OFFSET_EX1) | ||
891 | { | ||
892 | lw r0, r0 | ||
893 | PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS) | ||
894 | } | ||
895 | { | ||
896 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK | ||
897 | lw r32, r32 | ||
898 | } | ||
899 | bnz r0, 1f | ||
900 | j 2f | ||
901 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
902 | # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below | ||
903 | #endif | ||
904 | 1: bbnst r32, 2f | ||
905 | IRQ_DISABLE(r20,r21) | ||
906 | TRACE_IRQS_OFF | ||
907 | movei r0, 1 | ||
908 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
909 | bzt r30, .Lrestore_regs | ||
910 | j 3f | ||
911 | 2: TRACE_IRQS_ON | ||
912 | movei r0, 1 | ||
913 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
914 | IRQ_ENABLE(r20, r21) | ||
915 | bzt r30, .Lrestore_regs | ||
916 | 3: | ||
917 | |||
918 | |||
919 | /* | ||
920 | * We now commit to returning from this interrupt, since we will be | ||
921 | * doing things like setting EX_CONTEXT SPRs and unwinding the stack | ||
922 | * frame. No calls should be made to any other code after this point. | ||
923 | * This code should only be entered with ICS set. | ||
924 | * r32 must still be set to ptregs.flags. | ||
925 | * We launch loads to each cache line separately first, so we can | ||
926 | * get some parallelism out of the memory subsystem. | ||
927 | * We start zeroing caller-saved registers throughout, since | ||
928 | * that will save some cycles if this turns out to be a syscall. | ||
929 | */ | ||
930 | .Lrestore_regs: | ||
931 | FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */ | ||
932 | |||
933 | /* | ||
934 | * Rotate so we have one high bit and one low bit to test. | ||
935 | * - low bit says whether to restore all the callee-saved registers, | ||
936 | * or just r30-r33, and r52 up. | ||
937 | * - high bit (i.e. sign bit) says whether to restore all the | ||
938 | * caller-saved registers, or just r0. | ||
939 | */ | ||
940 | #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4 | ||
941 | # error Rotate trick does not work :-) | ||
942 | #endif | ||
943 | { | ||
944 | rli r20, r32, 30 | ||
945 | PTREGS_PTR(sp, PTREGS_OFFSET_REG(0)) | ||
946 | } | ||
947 | |||
948 | /* | ||
949 | * Load cache lines 0, 2, and 3 in that order, then use | ||
950 | * the last loaded value, which makes it likely that the other | ||
951 | * cache lines have also loaded, at which point we should be | ||
952 | * able to safely read all the remaining words on those cache | ||
953 | * lines without waiting for the memory subsystem. | ||
954 | */ | ||
955 | pop_reg_zero r0, r1, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) | ||
956 | pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30) | ||
957 | pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
958 | pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 | ||
959 | { | ||
960 | mtspr EX_CONTEXT_1_0, r21 | ||
961 | move r5, zero | ||
962 | } | ||
963 | { | ||
964 | mtspr EX_CONTEXT_1_1, lr | ||
965 | andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
966 | } | ||
967 | |||
968 | /* Restore callee-saveds that we actually use. */ | ||
969 | pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52) | ||
970 | pop_reg_zero r31, r7 | ||
971 | pop_reg_zero r32, r8 | ||
972 | pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33) | ||
973 | |||
974 | /* | ||
975 | * If we modified other callee-saveds, restore them now. | ||
976 | * This is rare, but could be via ptrace or signal handler. | ||
977 | */ | ||
978 | { | ||
979 | move r10, zero | ||
980 | bbs r20, .Lrestore_callees | ||
981 | } | ||
982 | .Lcontinue_restore_regs: | ||
983 | |||
984 | /* Check if we're returning from a syscall. */ | ||
985 | { | ||
986 | move r11, zero | ||
987 | blzt r20, 1f /* no, so go restore callee-save registers */ | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * Check if we're returning to userspace. | ||
992 | * Note that if we're not, we don't worry about zeroing everything. | ||
993 | */ | ||
994 | { | ||
995 | addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29) | ||
996 | bnz lr, .Lkernel_return | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * On return from syscall, we've restored r0 from pt_regs, but we | ||
1001 | * clear the remainder of the caller-saved registers. We could | ||
1002 | * restore the syscall arguments, but there's not much point, | ||
1003 | * and it ensures user programs aren't trying to use the | ||
1004 | * caller-saves if we clear them, as well as avoiding leaking | ||
1005 | * kernel pointers into userspace. | ||
1006 | */ | ||
1007 | pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
1008 | pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
1009 | { | ||
1010 | lw sp, sp | ||
1011 | move r14, zero | ||
1012 | move r15, zero | ||
1013 | } | ||
1014 | { move r16, zero; move r17, zero } | ||
1015 | { move r18, zero; move r19, zero } | ||
1016 | { move r20, zero; move r21, zero } | ||
1017 | { move r22, zero; move r23, zero } | ||
1018 | { move r24, zero; move r25, zero } | ||
1019 | { move r26, zero; move r27, zero } | ||
1020 | { move r28, zero; move r29, zero } | ||
1021 | iret | ||
1022 | |||
1023 | /* | ||
1024 | * Not a syscall, so restore caller-saved registers. | ||
1025 | * First kick off a load for cache line 1, which we're touching | ||
1026 | * for the first time here. | ||
1027 | */ | ||
1028 | .align 64 | ||
1029 | 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29) | ||
1030 | pop_reg r1 | ||
1031 | pop_reg r2 | ||
1032 | pop_reg r3 | ||
1033 | pop_reg r4 | ||
1034 | pop_reg r5 | ||
1035 | pop_reg r6 | ||
1036 | pop_reg r7 | ||
1037 | pop_reg r8 | ||
1038 | pop_reg r9 | ||
1039 | pop_reg r10 | ||
1040 | pop_reg r11 | ||
1041 | pop_reg r12 | ||
1042 | pop_reg r13 | ||
1043 | pop_reg r14 | ||
1044 | pop_reg r15 | ||
1045 | pop_reg r16 | ||
1046 | pop_reg r17 | ||
1047 | pop_reg r18 | ||
1048 | pop_reg r19 | ||
1049 | pop_reg r20 | ||
1050 | pop_reg r21 | ||
1051 | pop_reg r22 | ||
1052 | pop_reg r23 | ||
1053 | pop_reg r24 | ||
1054 | pop_reg r25 | ||
1055 | pop_reg r26 | ||
1056 | pop_reg r27 | ||
1057 | pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28) | ||
1058 | /* r29 already restored above */ | ||
1059 | bnz lr, .Lkernel_return | ||
1060 | pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
1061 | pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
1062 | lw sp, sp | ||
1063 | iret | ||
1064 | |||
1065 | /* | ||
1066 | * We can't restore tp when in kernel mode, since a thread might | ||
1067 | * have migrated from another cpu and brought a stale tp value. | ||
1068 | */ | ||
1069 | .Lkernel_return: | ||
1070 | pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
1071 | lw sp, sp | ||
1072 | iret | ||
1073 | |||
1074 | /* Restore callee-saved registers from r34 to r51. */ | ||
1075 | .Lrestore_callees: | ||
1076 | addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29) | ||
1077 | pop_reg r34 | ||
1078 | pop_reg r35 | ||
1079 | pop_reg r36 | ||
1080 | pop_reg r37 | ||
1081 | pop_reg r38 | ||
1082 | pop_reg r39 | ||
1083 | pop_reg r40 | ||
1084 | pop_reg r41 | ||
1085 | pop_reg r42 | ||
1086 | pop_reg r43 | ||
1087 | pop_reg r44 | ||
1088 | pop_reg r45 | ||
1089 | pop_reg r46 | ||
1090 | pop_reg r47 | ||
1091 | pop_reg r48 | ||
1092 | pop_reg r49 | ||
1093 | pop_reg r50 | ||
1094 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) | ||
1095 | j .Lcontinue_restore_regs | ||
1096 | |||
1097 | .Lwork_pending: | ||
1098 | /* Mask the reschedule flag */ | ||
1099 | andi r28, r29, _TIF_NEED_RESCHED | ||
1100 | |||
1101 | { | ||
1102 | /* | ||
1103 | * If the NEED_RESCHED flag is called, we call schedule(), which | ||
1104 | * may drop this context right here and go do something else. | ||
1105 | * On return, jump back to .Lresume_userspace and recheck. | ||
1106 | */ | ||
1107 | bz r28, .Lasync_tlb | ||
1108 | |||
1109 | /* Mask the async-tlb flag */ | ||
1110 | andi r28, r29, _TIF_ASYNC_TLB | ||
1111 | } | ||
1112 | |||
1113 | jal schedule | ||
1114 | FEEDBACK_REENTER(interrupt_return) | ||
1115 | |||
1116 | /* Reload the flags and check again */ | ||
1117 | j .Lresume_userspace | ||
1118 | |||
1119 | .Lasync_tlb: | ||
1120 | { | ||
1121 | bz r28, .Lneed_sigpending | ||
1122 | |||
1123 | /* Mask the sigpending flag */ | ||
1124 | andi r28, r29, _TIF_SIGPENDING | ||
1125 | } | ||
1126 | |||
1127 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1128 | jal do_async_page_fault | ||
1129 | FEEDBACK_REENTER(interrupt_return) | ||
1130 | |||
1131 | /* | ||
1132 | * Go restart the "resume userspace" process. We may have | ||
1133 | * fired a signal, and we need to disable interrupts again. | ||
1134 | */ | ||
1135 | j .Lresume_userspace | ||
1136 | |||
1137 | .Lneed_sigpending: | ||
1138 | /* | ||
1139 | * At this point we are either doing signal handling or single-step, | ||
1140 | * so either way make sure we have all the registers saved. | ||
1141 | */ | ||
1142 | push_extra_callee_saves r0 | ||
1143 | |||
1144 | { | ||
1145 | /* If no signal pending, skip to singlestep check */ | ||
1146 | bz r28, .Lneed_singlestep | ||
1147 | |||
1148 | /* Mask the singlestep flag */ | ||
1149 | andi r28, r29, _TIF_SINGLESTEP | ||
1150 | } | ||
1151 | |||
1152 | jal do_signal | ||
1153 | FEEDBACK_REENTER(interrupt_return) | ||
1154 | |||
1155 | /* Reload the flags and check again */ | ||
1156 | j .Lresume_userspace | ||
1157 | |||
1158 | .Lneed_singlestep: | ||
1159 | { | ||
1160 | /* Get a pointer to the EX1 field */ | ||
1161 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
1162 | |||
1163 | /* If we get here, our bit must be set. */ | ||
1164 | bz r28, .Lwork_confusion | ||
1165 | } | ||
1166 | /* If we are in priv mode, don't single step */ | ||
1167 | lw r28, r29 | ||
1168 | andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
1169 | bnz r28, .Lrestore_all | ||
1170 | |||
1171 | /* Allow interrupts within the single step code */ | ||
1172 | TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */ | ||
1173 | IRQ_ENABLE(r20, r21) | ||
1174 | |||
1175 | /* try to single-step the current instruction */ | ||
1176 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1177 | jal single_step_once | ||
1178 | FEEDBACK_REENTER(interrupt_return) | ||
1179 | |||
1180 | /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */ | ||
1181 | IRQ_DISABLE(r20,r21) | ||
1182 | |||
1183 | j .Lrestore_all | ||
1184 | |||
1185 | .Lwork_confusion: | ||
1186 | move r0, r28 | ||
1187 | panic "thread_info allwork flags unhandled on userspace resume: %#x" | ||
1188 | |||
1189 | STD_ENDPROC(interrupt_return) | ||
1190 | |||
1191 | /* | ||
1192 | * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit | ||
1193 | * before returning, so we can properly get more downcalls. | ||
1194 | */ | ||
1195 | .pushsection .text.handle_interrupt_downcall,"ax" | ||
1196 | handle_interrupt_downcall: | ||
1197 | finish_interrupt_save handle_interrupt_downcall | ||
1198 | check_single_stepping normal, .Ldispatch_downcall | ||
1199 | .Ldispatch_downcall: | ||
1200 | |||
1201 | /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ | ||
1202 | GET_INTERRUPTS_ENABLED_MASK_PTR(r30) | ||
1203 | { | ||
1204 | addi r30, r30, 4 | ||
1205 | movei r31, INT_MASK(INT_INTCTRL_1) | ||
1206 | } | ||
1207 | { | ||
1208 | lw r20, r30 | ||
1209 | nor r21, r31, zero | ||
1210 | } | ||
1211 | and r20, r20, r21 | ||
1212 | sw r30, r20 | ||
1213 | |||
1214 | { | ||
1215 | jalr r0 | ||
1216 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1217 | } | ||
1218 | FEEDBACK_REENTER(handle_interrupt_downcall) | ||
1219 | |||
1220 | /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ | ||
1221 | lw r20, r30 | ||
1222 | or r20, r20, r31 | ||
1223 | sw r30, r20 | ||
1224 | |||
1225 | { | ||
1226 | movei r30, 0 /* not an NMI */ | ||
1227 | j interrupt_return | ||
1228 | } | ||
1229 | STD_ENDPROC(handle_interrupt_downcall) | ||
1230 | |||
1231 | /* | ||
1232 | * Some interrupts don't check for single stepping | ||
1233 | */ | ||
1234 | .pushsection .text.handle_interrupt_no_single_step,"ax" | ||
1235 | handle_interrupt_no_single_step: | ||
1236 | finish_interrupt_save handle_interrupt_no_single_step | ||
1237 | { | ||
1238 | jalr r0 | ||
1239 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1240 | } | ||
1241 | FEEDBACK_REENTER(handle_interrupt_no_single_step) | ||
1242 | { | ||
1243 | movei r30, 0 /* not an NMI */ | ||
1244 | j interrupt_return | ||
1245 | } | ||
1246 | STD_ENDPROC(handle_interrupt_no_single_step) | ||
1247 | |||
1248 | /* | ||
1249 | * "NMI" interrupts mask ALL interrupts before calling the | ||
1250 | * handler, and don't check thread flags, etc., on the way | ||
1251 | * back out. In general, the only things we do here for NMIs | ||
1252 | * are the register save/restore, fixing the PC if we were | ||
1253 | * doing single step, and the dataplane kernel-TLB management. | ||
1254 | * We don't (for example) deal with start/stop of the sched tick. | ||
1255 | */ | ||
1256 | .pushsection .text.handle_nmi,"ax" | ||
1257 | handle_nmi: | ||
1258 | finish_interrupt_save handle_nmi | ||
1259 | check_single_stepping normal, .Ldispatch_nmi | ||
1260 | .Ldispatch_nmi: | ||
1261 | { | ||
1262 | jalr r0 | ||
1263 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1264 | } | ||
1265 | FEEDBACK_REENTER(handle_nmi) | ||
1266 | j interrupt_return | ||
1267 | STD_ENDPROC(handle_nmi) | ||
1268 | |||
1269 | /* | ||
1270 | * Parallel code for syscalls to handle_interrupt. | ||
1271 | */ | ||
1272 | .pushsection .text.handle_syscall,"ax" | ||
1273 | handle_syscall: | ||
1274 | finish_interrupt_save handle_syscall | ||
1275 | |||
1276 | /* | ||
1277 | * Check for if we are single stepping in user level. If so, then | ||
1278 | * we need to restore the PC. | ||
1279 | */ | ||
1280 | check_single_stepping syscall, .Ldispatch_syscall | ||
1281 | .Ldispatch_syscall: | ||
1282 | |||
1283 | /* Enable irqs. */ | ||
1284 | TRACE_IRQS_ON | ||
1285 | IRQ_ENABLE(r20, r21) | ||
1286 | |||
1287 | /* Bump the counter for syscalls made on this tile. */ | ||
1288 | moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
1289 | auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
1290 | add r20, r20, tp | ||
1291 | lw r21, r20 | ||
1292 | addi r21, r21, 1 | ||
1293 | sw r20, r21 | ||
1294 | |||
1295 | /* Trace syscalls, if requested. */ | ||
1296 | GET_THREAD_INFO(r31) | ||
1297 | addi r31, r31, THREAD_INFO_FLAGS_OFFSET | ||
1298 | lw r30, r31 | ||
1299 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1300 | bzt r30, .Lrestore_syscall_regs | ||
1301 | jal do_syscall_trace | ||
1302 | FEEDBACK_REENTER(handle_syscall) | ||
1303 | |||
1304 | /* | ||
1305 | * We always reload our registers from the stack at this | ||
1306 | * point. They might be valid, if we didn't build with | ||
1307 | * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not | ||
1308 | * doing syscall tracing, but there are enough cases now that it | ||
1309 | * seems simplest just to do the reload unconditionally. | ||
1310 | */ | ||
1311 | .Lrestore_syscall_regs: | ||
1312 | PTREGS_PTR(r11, PTREGS_OFFSET_REG(0)) | ||
1313 | pop_reg r0, r11 | ||
1314 | pop_reg r1, r11 | ||
1315 | pop_reg r2, r11 | ||
1316 | pop_reg r3, r11 | ||
1317 | pop_reg r4, r11 | ||
1318 | pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5) | ||
1319 | pop_reg TREG_SYSCALL_NR_NAME, r11 | ||
1320 | |||
1321 | /* Ensure that the syscall number is within the legal range. */ | ||
1322 | moveli r21, __NR_syscalls | ||
1323 | { | ||
1324 | slt_u r21, TREG_SYSCALL_NR_NAME, r21 | ||
1325 | moveli r20, lo16(sys_call_table) | ||
1326 | } | ||
1327 | { | ||
1328 | bbns r21, .Linvalid_syscall | ||
1329 | auli r20, r20, ha16(sys_call_table) | ||
1330 | } | ||
1331 | s2a r20, TREG_SYSCALL_NR_NAME, r20 | ||
1332 | lw r20, r20 | ||
1333 | |||
1334 | /* Jump to syscall handler. */ | ||
1335 | jalr r20; .Lhandle_syscall_link: | ||
1336 | FEEDBACK_REENTER(handle_syscall) | ||
1337 | |||
1338 | /* | ||
1339 | * Write our r0 onto the stack so it gets restored instead | ||
1340 | * of whatever the user had there before. | ||
1341 | */ | ||
1342 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1343 | sw r29, r0 | ||
1344 | |||
1345 | /* Do syscall trace again, if requested. */ | ||
1346 | lw r30, r31 | ||
1347 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1348 | bzt r30, 1f | ||
1349 | jal do_syscall_trace | ||
1350 | FEEDBACK_REENTER(handle_syscall) | ||
1351 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1352 | |||
1353 | .Linvalid_syscall: | ||
1354 | /* Report an invalid syscall back to the user program */ | ||
1355 | { | ||
1356 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1357 | movei r28, -ENOSYS | ||
1358 | } | ||
1359 | sw r29, r28 | ||
1360 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1361 | STD_ENDPROC(handle_syscall) | ||
1362 | |||
1363 | /* Return the address for oprofile to suppress in backtraces. */ | ||
1364 | STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall) | ||
1365 | lnk r0 | ||
1366 | { | ||
1367 | addli r0, r0, .Lhandle_syscall_link - . | ||
1368 | jrp lr | ||
1369 | } | ||
1370 | STD_ENDPROC(handle_syscall_link_address) | ||
1371 | |||
1372 | STD_ENTRY(ret_from_fork) | ||
1373 | jal sim_notify_fork | ||
1374 | jal schedule_tail | ||
1375 | FEEDBACK_REENTER(ret_from_fork) | ||
1376 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1377 | STD_ENDPROC(ret_from_fork) | ||
1378 | |||
1379 | /* | ||
1380 | * Code for ill interrupt. | ||
1381 | */ | ||
1382 | .pushsection .text.handle_ill,"ax" | ||
1383 | handle_ill: | ||
1384 | finish_interrupt_save handle_ill | ||
1385 | |||
1386 | /* | ||
1387 | * Check for if we are single stepping in user level. If so, then | ||
1388 | * we need to restore the PC. | ||
1389 | */ | ||
1390 | check_single_stepping ill, .Ldispatch_normal_ill | ||
1391 | |||
1392 | { | ||
1393 | /* See if the PC is the 1st bundle in the buffer */ | ||
1394 | seq r25, r27, r26 | ||
1395 | |||
1396 | /* Point to the 2nd bundle in the buffer */ | ||
1397 | addi r26, r26, 8 | ||
1398 | } | ||
1399 | { | ||
1400 | /* Point to the original pc */ | ||
1401 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET | ||
1402 | |||
1403 | /* Branch if the PC is the 1st bundle in the buffer */ | ||
1404 | bnz r25, 3f | ||
1405 | } | ||
1406 | { | ||
1407 | /* See if the PC is the 2nd bundle of the buffer */ | ||
1408 | seq r25, r27, r26 | ||
1409 | |||
1410 | /* Set PC to next instruction */ | ||
1411 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET | ||
1412 | } | ||
1413 | { | ||
1414 | /* Point to flags */ | ||
1415 | addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET | ||
1416 | |||
1417 | /* Branch if PC is in the second bundle */ | ||
1418 | bz r25, 2f | ||
1419 | } | ||
1420 | /* Load flags */ | ||
1421 | lw r25, r25 | ||
1422 | { | ||
1423 | /* | ||
1424 | * Get the offset for the register to restore | ||
1425 | * Note: the lower bound is 2, so we have implicit scaling by 4. | ||
1426 | * No multiplication of the register number by the size of a register | ||
1427 | * is needed. | ||
1428 | */ | ||
1429 | mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \ | ||
1430 | SINGLESTEP_STATE_TARGET_UB | ||
1431 | |||
1432 | /* Mask Rewrite_LR */ | ||
1433 | andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE | ||
1434 | } | ||
1435 | { | ||
1436 | addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET | ||
1437 | |||
1438 | /* Don't rewrite temp register */ | ||
1439 | bz r25, 3f | ||
1440 | } | ||
1441 | { | ||
1442 | /* Get the temp value */ | ||
1443 | lw r29, r29 | ||
1444 | |||
1445 | /* Point to where the register is stored */ | ||
1446 | add r27, r27, sp | ||
1447 | } | ||
1448 | |||
1449 | /* Add in the C ABI save area size to the register offset */ | ||
1450 | addi r27, r27, C_ABI_SAVE_AREA_SIZE | ||
1451 | |||
1452 | /* Restore the user's register with the temp value */ | ||
1453 | sw r27, r29 | ||
1454 | j 3f | ||
1455 | |||
1456 | 2: | ||
1457 | /* Must be in the third bundle */ | ||
1458 | addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET | ||
1459 | |||
1460 | 3: | ||
1461 | /* set PC and continue */ | ||
1462 | lw r26, r24 | ||
1463 | sw r28, r26 | ||
1464 | |||
1465 | /* Clear TIF_SINGLESTEP */ | ||
1466 | GET_THREAD_INFO(r0) | ||
1467 | |||
1468 | addi r1, r0, THREAD_INFO_FLAGS_OFFSET | ||
1469 | { | ||
1470 | lw r2, r1 | ||
1471 | addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */ | ||
1472 | } | ||
1473 | andi r2, r2, ~_TIF_SINGLESTEP | ||
1474 | sw r1, r2 | ||
1475 | |||
1476 | /* Issue a sigtrap */ | ||
1477 | { | ||
1478 | lw r0, r0 /* indirect thru thread_info to get task_info*/ | ||
1479 | addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */ | ||
1480 | move r2, zero /* load error code into r2 */ | ||
1481 | } | ||
1482 | |||
1483 | jal send_sigtrap /* issue a SIGTRAP */ | ||
1484 | FEEDBACK_REENTER(handle_ill) | ||
1485 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1486 | |||
1487 | .Ldispatch_normal_ill: | ||
1488 | { | ||
1489 | jalr r0 | ||
1490 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1491 | } | ||
1492 | FEEDBACK_REENTER(handle_ill) | ||
1493 | { | ||
1494 | movei r30, 0 /* not an NMI */ | ||
1495 | j interrupt_return | ||
1496 | } | ||
1497 | STD_ENDPROC(handle_ill) | ||
1498 | |||
1499 | .pushsection .rodata, "a" | ||
1500 | .align 8 | ||
1501 | bpt_code: | ||
1502 | bpt | ||
1503 | ENDPROC(bpt_code) | ||
1504 | .popsection | ||
1505 | |||
1506 | /* Various stub interrupt handlers and syscall handlers */ | ||
1507 | |||
1508 | STD_ENTRY_LOCAL(_kernel_double_fault) | ||
1509 | mfspr r1, EX_CONTEXT_1_0 | ||
1510 | move r2, lr | ||
1511 | move r3, sp | ||
1512 | move r4, r52 | ||
1513 | addi sp, sp, -C_ABI_SAVE_AREA_SIZE | ||
1514 | j kernel_double_fault | ||
1515 | STD_ENDPROC(_kernel_double_fault) | ||
1516 | |||
1517 | STD_ENTRY_LOCAL(bad_intr) | ||
1518 | mfspr r2, EX_CONTEXT_1_0 | ||
1519 | panic "Unhandled interrupt %#x: PC %#lx" | ||
1520 | STD_ENDPROC(bad_intr) | ||
1521 | |||
1522 | /* Put address of pt_regs in reg and jump. */ | ||
1523 | #define PTREGS_SYSCALL(x, reg) \ | ||
1524 | STD_ENTRY(x); \ | ||
1525 | { \ | ||
1526 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ | ||
1527 | j _##x \ | ||
1528 | }; \ | ||
1529 | STD_ENDPROC(x) | ||
1530 | |||
1531 | PTREGS_SYSCALL(sys_execve, r3) | ||
1532 | PTREGS_SYSCALL(sys_sigaltstack, r2) | ||
1533 | PTREGS_SYSCALL(sys_rt_sigreturn, r0) | ||
1534 | |||
1535 | /* Save additional callee-saves to pt_regs, put address in reg and jump. */ | ||
1536 | #define PTREGS_SYSCALL_ALL_REGS(x, reg) \ | ||
1537 | STD_ENTRY(x); \ | ||
1538 | push_extra_callee_saves reg; \ | ||
1539 | j _##x; \ | ||
1540 | STD_ENDPROC(x) | ||
1541 | |||
1542 | PTREGS_SYSCALL_ALL_REGS(sys_fork, r0) | ||
1543 | PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0) | ||
1544 | PTREGS_SYSCALL_ALL_REGS(sys_clone, r4) | ||
1545 | PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1) | ||
1546 | |||
1547 | /* | ||
1548 | * This entrypoint is taken for the cmpxchg and atomic_update fast | ||
1549 | * swints. We may wish to generalize it to other fast swints at some | ||
1550 | * point, but for now there are just two very similar ones, which | ||
1551 | * makes it faster. | ||
1552 | * | ||
1553 | * The fast swint code is designed to have a small footprint. It does | ||
1554 | * not save or restore any GPRs, counting on the caller-save registers | ||
1555 | * to be available to it on entry. It does not modify any callee-save | ||
1556 | * registers (including "lr"). It does not check what PL it is being | ||
1557 | * called at, so you'd better not call it other than at PL0. | ||
1558 | * | ||
1559 | * It does not use the stack, but since it might be re-interrupted by | ||
1560 | * a page fault which would assume the stack was valid, it does | ||
1561 | * save/restore the stack pointer and zero it out to make sure it gets reset. | ||
1562 | * Since we always keep interrupts disabled, the hypervisor won't | ||
1563 | * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them | ||
1564 | * (other than to advance the PC on return). | ||
1565 | * | ||
1566 | * We have to manually validate the user vs kernel address range | ||
1567 | * (since at PL1 we can read/write both), and for performance reasons | ||
1568 | * we don't allow cmpxchg on the fc000000 memory region, since we only | ||
1569 | * validate that the user address is below PAGE_OFFSET. | ||
1570 | * | ||
1571 | * We place it in the __HEAD section to ensure it is relatively | ||
1572 | * near to the intvec_SWINT_1 code (reachable by a conditional branch). | ||
1573 | * | ||
1574 | * Must match register usage in do_page_fault(). | ||
1575 | */ | ||
1576 | __HEAD | ||
1577 | .align 64 | ||
1578 | /* Align much later jump on the start of a cache line. */ | ||
1579 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1580 | nop; nop | ||
1581 | #endif | ||
1582 | ENTRY(sys_cmpxchg) | ||
1583 | |||
1584 | /* | ||
1585 | * Save "sp" and set it zero for any possible page fault. | ||
1586 | * | ||
1587 | * HACK: We want to both zero sp and check r0's alignment, | ||
1588 | * so we do both at once. If "sp" becomes nonzero we | ||
1589 | * know r0 is unaligned and branch to the error handler that | ||
1590 | * restores sp, so this is OK. | ||
1591 | * | ||
1592 | * ICS is disabled right now so having a garbage but nonzero | ||
1593 | * sp is OK, since we won't execute any faulting instructions | ||
1594 | * when it is nonzero. | ||
1595 | */ | ||
1596 | { | ||
1597 | move r27, sp | ||
1598 | andi sp, r0, 3 | ||
1599 | } | ||
1600 | |||
1601 | /* | ||
1602 | * Get the lock address in ATOMIC_LOCK_REG, and also validate that the | ||
1603 | * address is less than PAGE_OFFSET, since that won't trap at PL1. | ||
1604 | * We only use bits less than PAGE_SHIFT to avoid having to worry | ||
1605 | * about aliasing among multiple mappings of the same physical page, | ||
1606 | * and we ignore the low 3 bits so we have one lock that covers | ||
1607 | * both a cmpxchg64() and a cmpxchg() on either its low or high word. | ||
1608 | * NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c. | ||
1609 | */ | ||
1610 | |||
1611 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1612 | { | ||
1613 | /* Check for unaligned input. */ | ||
1614 | bnz sp, .Lcmpxchg_badaddr | ||
1615 | mm r25, r0, zero, 3, PAGE_SHIFT-1 | ||
1616 | } | ||
1617 | { | ||
1618 | crc32_32 r25, zero, r25 | ||
1619 | moveli r21, lo16(atomic_lock_ptr) | ||
1620 | } | ||
1621 | { | ||
1622 | auli r21, r21, ha16(atomic_lock_ptr) | ||
1623 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ | ||
1624 | } | ||
1625 | { | ||
1626 | shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT | ||
1627 | slt_u r23, r0, r23 | ||
1628 | |||
1629 | /* | ||
1630 | * Ensure that the TLB is loaded before we take out the lock. | ||
1631 | * On TILEPro, this will start fetching the value all the way | ||
1632 | * into our L1 as well (and if it gets modified before we | ||
1633 | * grab the lock, it will be invalidated from our cache | ||
1634 | * before we reload it). On tile64, we'll start fetching it | ||
1635 | * into our L1 if we're the home, and if we're not, we'll | ||
1636 | * still at least start fetching it into the home's L2. | ||
1637 | */ | ||
1638 | lw r26, r0 | ||
1639 | } | ||
1640 | { | ||
1641 | s2a r21, r20, r21 | ||
1642 | bbns r23, .Lcmpxchg_badaddr | ||
1643 | } | ||
1644 | { | ||
1645 | lw r21, r21 | ||
1646 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 | ||
1647 | andi r25, r25, ATOMIC_HASH_L2_SIZE - 1 | ||
1648 | } | ||
1649 | { | ||
1650 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ | ||
1651 | bbs r23, .Lcmpxchg64 | ||
1652 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ | ||
1653 | } | ||
1654 | |||
1655 | { | ||
1656 | /* | ||
1657 | * We very carefully align the code that actually runs with | ||
1658 | * the lock held (nine bundles) so that we know it is all in | ||
1659 | * the icache when we start. This instruction (the jump) is | ||
1660 | * at the start of the first cache line, address zero mod 64; | ||
1661 | * we jump to somewhere in the second cache line to issue the | ||
1662 | * tns, then jump back to finish up. | ||
1663 | */ | ||
1664 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 | ||
1665 | j .Lcmpxchg32_tns | ||
1666 | } | ||
1667 | |||
1668 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
1669 | { | ||
1670 | /* Check for unaligned input. */ | ||
1671 | bnz sp, .Lcmpxchg_badaddr | ||
1672 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ | ||
1673 | } | ||
1674 | { | ||
1675 | /* | ||
1676 | * Slide bits into position for 'mm'. We want to ignore | ||
1677 | * the low 3 bits of r0, and consider only the next | ||
1678 | * ATOMIC_HASH_SHIFT bits. | ||
1679 | * Because of C pointer arithmetic, we want to compute this: | ||
1680 | * | ||
1681 | * ((char*)atomic_locks + | ||
1682 | * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2)) | ||
1683 | * | ||
1684 | * Instead of two shifts we just ">> 1", and use 'mm' | ||
1685 | * to ignore the low and high bits we don't want. | ||
1686 | */ | ||
1687 | shri r25, r0, 1 | ||
1688 | |||
1689 | slt_u r23, r0, r23 | ||
1690 | |||
1691 | /* | ||
1692 | * Ensure that the TLB is loaded before we take out the lock. | ||
1693 | * On tilepro, this will start fetching the value all the way | ||
1694 | * into our L1 as well (and if it gets modified before we | ||
1695 | * grab the lock, it will be invalidated from our cache | ||
1696 | * before we reload it). On tile64, we'll start fetching it | ||
1697 | * into our L1 if we're the home, and if we're not, we'll | ||
1698 | * still at least start fetching it into the home's L2. | ||
1699 | */ | ||
1700 | lw r26, r0 | ||
1701 | } | ||
1702 | { | ||
1703 | /* atomic_locks is page aligned so this suffices to get its addr. */ | ||
1704 | auli r21, zero, hi16(atomic_locks) | ||
1705 | |||
1706 | bbns r23, .Lcmpxchg_badaddr | ||
1707 | } | ||
1708 | { | ||
1709 | /* | ||
1710 | * Insert the hash bits into the page-aligned pointer. | ||
1711 | * ATOMIC_HASH_SHIFT is so big that we don't actually hash | ||
1712 | * the unmasked address bits, as that may cause unnecessary | ||
1713 | * collisions. | ||
1714 | */ | ||
1715 | mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1 | ||
1716 | |||
1717 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 | ||
1718 | } | ||
1719 | { | ||
1720 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ | ||
1721 | bbs r23, .Lcmpxchg64 | ||
1722 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ | ||
1723 | } | ||
1724 | { | ||
1725 | /* | ||
1726 | * We very carefully align the code that actually runs with | ||
1727 | * the lock held (nine bundles) so that we know it is all in | ||
1728 | * the icache when we start. This instruction (the jump) is | ||
1729 | * at the start of the first cache line, address zero mod 64; | ||
1730 | * we jump to somewhere in the second cache line to issue the | ||
1731 | * tns, then jump back to finish up. | ||
1732 | */ | ||
1733 | j .Lcmpxchg32_tns | ||
1734 | } | ||
1735 | |||
1736 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
1737 | |||
1738 | ENTRY(__sys_cmpxchg_grab_lock) | ||
1739 | |||
1740 | /* | ||
1741 | * Perform the actual cmpxchg or atomic_update. | ||
1742 | * Note that __futex_mark_unlocked() in uClibc relies on | ||
1743 | * atomic_update() to always perform an "mf", so don't make | ||
1744 | * it optional or conditional without modifying that code. | ||
1745 | */ | ||
1746 | .Ldo_cmpxchg32: | ||
1747 | { | ||
1748 | lw r21, r0 | ||
1749 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update | ||
1750 | move r24, r2 | ||
1751 | } | ||
1752 | { | ||
1753 | seq r22, r21, r1 /* See if cmpxchg matches. */ | ||
1754 | and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */ | ||
1755 | } | ||
1756 | { | ||
1757 | or r22, r22, r23 /* Skip compare branch for atomic_update. */ | ||
1758 | add r25, r25, r2 /* Compute (*mem & mask) + addend. */ | ||
1759 | } | ||
1760 | { | ||
1761 | mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */ | ||
1762 | bbns r22, .Lcmpxchg32_mismatch | ||
1763 | } | ||
1764 | sw r0, r24 | ||
1765 | |||
1766 | /* Do slow mtspr here so the following "mf" waits less. */ | ||
1767 | { | ||
1768 | move sp, r27 | ||
1769 | mtspr EX_CONTEXT_1_0, r28 | ||
1770 | } | ||
1771 | mf | ||
1772 | |||
1773 | /* The following instruction is the start of the second cache line. */ | ||
1774 | { | ||
1775 | move r0, r21 | ||
1776 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1777 | } | ||
1778 | iret | ||
1779 | |||
1780 | /* Duplicated code here in the case where we don't overlap "mf" */ | ||
1781 | .Lcmpxchg32_mismatch: | ||
1782 | { | ||
1783 | move r0, r21 | ||
1784 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1785 | } | ||
1786 | { | ||
1787 | move sp, r27 | ||
1788 | mtspr EX_CONTEXT_1_0, r28 | ||
1789 | } | ||
1790 | iret | ||
1791 | |||
1792 | /* | ||
1793 | * The locking code is the same for 32-bit cmpxchg/atomic_update, | ||
1794 | * and for 64-bit cmpxchg. We provide it as a macro and put | ||
1795 | * it into both versions. We can't share the code literally | ||
1796 | * since it depends on having the right branch-back address. | ||
1797 | * Note that the first few instructions should share the cache | ||
1798 | * line with the second half of the actual locked code. | ||
1799 | */ | ||
1800 | .macro cmpxchg_lock, bitwidth | ||
1801 | |||
1802 | /* Lock; if we succeed, jump back up to the read-modify-write. */ | ||
1803 | #ifdef CONFIG_SMP | ||
1804 | tns r21, ATOMIC_LOCK_REG_NAME | ||
1805 | #else | ||
1806 | /* | ||
1807 | * Non-SMP preserves all the lock infrastructure, to keep the | ||
1808 | * code simpler for the interesting (SMP) case. However, we do | ||
1809 | * one small optimization here and in atomic_asm.S, which is | ||
1810 | * to fake out acquiring the actual lock in the atomic_lock table. | ||
1811 | */ | ||
1812 | movei r21, 0 | ||
1813 | #endif | ||
1814 | |||
1815 | /* Issue the slow SPR here while the tns result is in flight. */ | ||
1816 | mfspr r28, EX_CONTEXT_1_0 | ||
1817 | |||
1818 | { | ||
1819 | addi r28, r28, 8 /* return to the instruction after the swint1 */ | ||
1820 | bzt r21, .Ldo_cmpxchg\bitwidth | ||
1821 | } | ||
1822 | /* | ||
1823 | * The preceding instruction is the last thing that must be | ||
1824 | * on the second cache line. | ||
1825 | */ | ||
1826 | |||
1827 | #ifdef CONFIG_SMP | ||
1828 | /* | ||
1829 | * We failed to acquire the tns lock on our first try. Now use | ||
1830 | * bounded exponential backoff to retry, like __atomic_spinlock(). | ||
1831 | */ | ||
1832 | { | ||
1833 | moveli r23, 2048 /* maximum backoff time in cycles */ | ||
1834 | moveli r25, 32 /* starting backoff time in cycles */ | ||
1835 | } | ||
1836 | 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */ | ||
1837 | 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */ | ||
1838 | sub r22, r22, r26 | ||
1839 | slt r22, r22, r25 | ||
1840 | bbst r22, 2b | ||
1841 | { | ||
1842 | shli r25, r25, 1 /* double the backoff; retry the tns */ | ||
1843 | tns r21, ATOMIC_LOCK_REG_NAME | ||
1844 | } | ||
1845 | slt r26, r23, r25 /* is the proposed backoff too big? */ | ||
1846 | { | ||
1847 | mvnz r25, r26, r23 | ||
1848 | bzt r21, .Ldo_cmpxchg\bitwidth | ||
1849 | } | ||
1850 | j 1b | ||
1851 | #endif /* CONFIG_SMP */ | ||
1852 | .endm | ||
1853 | |||
1854 | .Lcmpxchg32_tns: | ||
1855 | cmpxchg_lock 32 | ||
1856 | |||
1857 | /* | ||
1858 | * This code is invoked from sys_cmpxchg after most of the | ||
1859 | * preconditions have been checked. We still need to check | ||
1860 | * that r0 is 8-byte aligned, since if it's not we won't | ||
1861 | * actually be atomic. However, ATOMIC_LOCK_REG has the atomic | ||
1862 | * lock pointer and r27/r28 have the saved SP/PC. | ||
1863 | * r23 is holding "r0 & 7" so we can test for alignment. | ||
1864 | * The compare value is in r2/r3; the new value is in r4/r5. | ||
1865 | * On return, we must put the old value in r0/r1. | ||
1866 | */ | ||
1867 | .align 64 | ||
1868 | .Lcmpxchg64: | ||
1869 | { | ||
1870 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1871 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 | ||
1872 | #endif | ||
1873 | bzt r23, .Lcmpxchg64_tns | ||
1874 | } | ||
1875 | j .Lcmpxchg_badaddr | ||
1876 | |||
1877 | .Ldo_cmpxchg64: | ||
1878 | { | ||
1879 | lw r21, r0 | ||
1880 | addi r25, r0, 4 | ||
1881 | } | ||
1882 | { | ||
1883 | lw r1, r25 | ||
1884 | } | ||
1885 | seq r26, r21, r2 | ||
1886 | { | ||
1887 | bz r26, .Lcmpxchg64_mismatch | ||
1888 | seq r26, r1, r3 | ||
1889 | } | ||
1890 | { | ||
1891 | bz r26, .Lcmpxchg64_mismatch | ||
1892 | } | ||
1893 | sw r0, r4 | ||
1894 | sw r25, r5 | ||
1895 | |||
1896 | /* | ||
1897 | * The 32-bit path provides optimized "match" and "mismatch" | ||
1898 | * iret paths, but we don't have enough bundles in this cache line | ||
1899 | * to do that, so we just make even the "mismatch" path do an "mf". | ||
1900 | */ | ||
1901 | .Lcmpxchg64_mismatch: | ||
1902 | { | ||
1903 | move sp, r27 | ||
1904 | mtspr EX_CONTEXT_1_0, r28 | ||
1905 | } | ||
1906 | mf | ||
1907 | { | ||
1908 | move r0, r21 | ||
1909 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1910 | } | ||
1911 | iret | ||
1912 | |||
1913 | .Lcmpxchg64_tns: | ||
1914 | cmpxchg_lock 64 | ||
1915 | |||
1916 | |||
1917 | /* | ||
1918 | * Reset sp and revector to sys_cmpxchg_badaddr(), which will | ||
1919 | * just raise the appropriate signal and exit. Doing it this | ||
1920 | * way means we don't have to duplicate the code in intvec.S's | ||
1921 | * int_hand macro that locates the top of the stack. | ||
1922 | */ | ||
1923 | .Lcmpxchg_badaddr: | ||
1924 | { | ||
1925 | moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr | ||
1926 | move sp, r27 | ||
1927 | } | ||
1928 | j intvec_SWINT_1 | ||
1929 | ENDPROC(sys_cmpxchg) | ||
1930 | ENTRY(__sys_cmpxchg_end) | ||
1931 | |||
1932 | |||
1933 | /* The single-step support may need to read all the registers. */ | ||
1934 | int_unalign: | ||
1935 | push_extra_callee_saves r0 | ||
1936 | j do_trap | ||
1937 | |||
1938 | /* Include .intrpt1 array of interrupt vectors */ | ||
1939 | .section ".intrpt1", "ax" | ||
1940 | |||
1941 | #define op_handle_perf_interrupt bad_intr | ||
1942 | #define op_handle_aux_perf_interrupt bad_intr | ||
1943 | |||
1944 | #ifndef CONFIG_HARDWALL | ||
1945 | #define do_hardwall_trap bad_intr | ||
1946 | #endif | ||
1947 | |||
1948 | int_hand INT_ITLB_MISS, ITLB_MISS, \ | ||
1949 | do_page_fault, handle_interrupt_no_single_step | ||
1950 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr | ||
1951 | int_hand INT_ILL, ILL, do_trap, handle_ill | ||
1952 | int_hand INT_GPV, GPV, do_trap | ||
1953 | int_hand INT_SN_ACCESS, SN_ACCESS, do_trap | ||
1954 | int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap | ||
1955 | int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap | ||
1956 | int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr | ||
1957 | int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr | ||
1958 | int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr | ||
1959 | int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr | ||
1960 | int_hand INT_SWINT_3, SWINT_3, do_trap | ||
1961 | int_hand INT_SWINT_2, SWINT_2, do_trap | ||
1962 | int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall | ||
1963 | int_hand INT_SWINT_0, SWINT_0, do_trap | ||
1964 | int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign | ||
1965 | int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault | ||
1966 | int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault | ||
1967 | int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault | ||
1968 | int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault | ||
1969 | int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault | ||
1970 | int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr | ||
1971 | int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap | ||
1972 | int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr | ||
1973 | int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap | ||
1974 | int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt | ||
1975 | int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr | ||
1976 | int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr | ||
1977 | int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr | ||
1978 | int_hand INT_IDN_CA, IDN_CA, bad_intr | ||
1979 | int_hand INT_UDN_CA, UDN_CA, bad_intr | ||
1980 | int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr | ||
1981 | int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr | ||
1982 | int_hand INT_PERF_COUNT, PERF_COUNT, \ | ||
1983 | op_handle_perf_interrupt, handle_nmi | ||
1984 | int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr | ||
1985 | int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr | ||
1986 | dc_dispatch INT_INTCTRL_1, INTCTRL_1 | ||
1987 | int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr | ||
1988 | int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ | ||
1989 | hv_message_intr, handle_interrupt_downcall | ||
1990 | int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \ | ||
1991 | tile_dev_intr, handle_interrupt_downcall | ||
1992 | int_hand INT_I_ASID, I_ASID, bad_intr | ||
1993 | int_hand INT_D_ASID, D_ASID, bad_intr | ||
1994 | int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \ | ||
1995 | do_page_fault, handle_interrupt_downcall | ||
1996 | int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \ | ||
1997 | do_page_fault, handle_interrupt_downcall | ||
1998 | int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \ | ||
1999 | do_page_fault, handle_interrupt_downcall | ||
2000 | int_hand INT_SN_CPL, SN_CPL, bad_intr | ||
2001 | int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap | ||
2002 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
2003 | int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ | ||
2004 | op_handle_aux_perf_interrupt, handle_nmi | ||
2005 | #endif | ||
2006 | |||
2007 | /* Synthetic interrupt delivered only by the simulator */ | ||
2008 | int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint | ||
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c new file mode 100644 index 000000000000..596c60086930 --- /dev/null +++ b/arch/tile/kernel/irq.c | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/kernel_stat.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <hv/drv_pcie_rc_intf.h> | ||
22 | #include <arch/spr_def.h> | ||
23 | #include <asm/traps.h> | ||
24 | |||
25 | /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ | ||
26 | #define IS_HW_CLEARED 1 | ||
27 | |||
28 | /* | ||
29 | * The set of interrupts we enable for raw_local_irq_enable(). | ||
30 | * This is initialized to have just a single interrupt that the kernel | ||
31 | * doesn't actually use as a sentinel. During kernel init, | ||
32 | * interrupts are added as the kernel gets prepared to support them. | ||
33 | * NOTE: we could probably initialize them all statically up front. | ||
34 | */ | ||
35 | DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = | ||
36 | INITIAL_INTERRUPTS_ENABLED; | ||
37 | EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); | ||
38 | |||
39 | /* Define per-tile device interrupt statistics state. */ | ||
40 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; | ||
41 | EXPORT_PER_CPU_SYMBOL(irq_stat); | ||
42 | |||
43 | /* | ||
44 | * Define per-tile irq disable mask; the hardware/HV only has a single | ||
45 | * mask that we use to implement both masking and disabling. | ||
46 | */ | ||
47 | static DEFINE_PER_CPU(unsigned long, irq_disable_mask) | ||
48 | ____cacheline_internodealigned_in_smp; | ||
49 | |||
50 | /* | ||
51 | * Per-tile IRQ nesting depth. Used to make sure we enable newly | ||
52 | * enabled IRQs before exiting the outermost interrupt. | ||
53 | */ | ||
54 | static DEFINE_PER_CPU(int, irq_depth); | ||
55 | |||
56 | /* State for allocating IRQs on Gx. */ | ||
57 | #if CHIP_HAS_IPI() | ||
58 | static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); | ||
59 | static DEFINE_SPINLOCK(available_irqs_lock); | ||
60 | #endif | ||
61 | |||
62 | #if CHIP_HAS_IPI() | ||
63 | /* Use SPRs to manipulate device interrupts. */ | ||
64 | #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_1, irq_mask) | ||
65 | #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_1, irq_mask) | ||
66 | #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_1, irq_mask) | ||
67 | #else | ||
68 | /* Use HV to manipulate device interrupts. */ | ||
69 | #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) | ||
70 | #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) | ||
71 | #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) | ||
72 | #endif | ||
73 | |||
74 | /* | ||
75 | * The interrupt handling path, implemented in terms of HV interrupt | ||
76 | * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. | ||
77 | */ | ||
78 | void tile_dev_intr(struct pt_regs *regs, int intnum) | ||
79 | { | ||
80 | int depth = __get_cpu_var(irq_depth)++; | ||
81 | unsigned long original_irqs; | ||
82 | unsigned long remaining_irqs; | ||
83 | struct pt_regs *old_regs; | ||
84 | |||
85 | #if CHIP_HAS_IPI() | ||
86 | /* | ||
87 | * Pending interrupts are listed in an SPR. We might be | ||
88 | * nested, so be sure to only handle irqs that weren't already | ||
89 | * masked by a previous interrupt. Then, mask out the ones | ||
90 | * we're going to handle. | ||
91 | */ | ||
92 | unsigned long masked = __insn_mfspr(SPR_IPI_MASK_1); | ||
93 | original_irqs = __insn_mfspr(SPR_IPI_EVENT_1) & ~masked; | ||
94 | __insn_mtspr(SPR_IPI_MASK_SET_1, original_irqs); | ||
95 | #else | ||
96 | /* | ||
97 | * Hypervisor performs the equivalent of the Gx code above and | ||
98 | * then puts the pending interrupt mask into a system save reg | ||
99 | * for us to find. | ||
100 | */ | ||
101 | original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); | ||
102 | #endif | ||
103 | remaining_irqs = original_irqs; | ||
104 | |||
105 | /* Track time spent here in an interrupt context. */ | ||
106 | old_regs = set_irq_regs(regs); | ||
107 | irq_enter(); | ||
108 | |||
109 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
110 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | ||
111 | { | ||
112 | long sp = stack_pointer - (long) current_thread_info(); | ||
113 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
114 | pr_emerg("tile_dev_intr: " | ||
115 | "stack overflow: %ld\n", | ||
116 | sp - sizeof(struct thread_info)); | ||
117 | dump_stack(); | ||
118 | } | ||
119 | } | ||
120 | #endif | ||
121 | while (remaining_irqs) { | ||
122 | unsigned long irq = __ffs(remaining_irqs); | ||
123 | remaining_irqs &= ~(1UL << irq); | ||
124 | |||
125 | /* Count device irqs; Linux IPIs are counted elsewhere. */ | ||
126 | if (irq != IRQ_RESCHEDULE) | ||
127 | __get_cpu_var(irq_stat).irq_dev_intr_count++; | ||
128 | |||
129 | generic_handle_irq(irq); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * If we weren't nested, turn on all enabled interrupts, | ||
134 | * including any that were reenabled during interrupt | ||
135 | * handling. | ||
136 | */ | ||
137 | if (depth == 0) | ||
138 | unmask_irqs(~__get_cpu_var(irq_disable_mask)); | ||
139 | |||
140 | __get_cpu_var(irq_depth)--; | ||
141 | |||
142 | /* | ||
143 | * Track time spent against the current process again and | ||
144 | * process any softirqs if they are waiting. | ||
145 | */ | ||
146 | irq_exit(); | ||
147 | set_irq_regs(old_regs); | ||
148 | } | ||
149 | |||
150 | |||
151 | /* | ||
152 | * Remove an irq from the disabled mask. If we're in an interrupt | ||
153 | * context, defer enabling the HW interrupt until we leave. | ||
154 | */ | ||
155 | void enable_percpu_irq(unsigned int irq) | ||
156 | { | ||
157 | get_cpu_var(irq_disable_mask) &= ~(1UL << irq); | ||
158 | if (__get_cpu_var(irq_depth) == 0) | ||
159 | unmask_irqs(1UL << irq); | ||
160 | put_cpu_var(irq_disable_mask); | ||
161 | } | ||
162 | EXPORT_SYMBOL(enable_percpu_irq); | ||
163 | |||
164 | /* | ||
165 | * Add an irq to the disabled mask. We disable the HW interrupt | ||
166 | * immediately so that there's no possibility of it firing. If we're | ||
167 | * in an interrupt context, the return path is careful to avoid | ||
168 | * unmasking a newly disabled interrupt. | ||
169 | */ | ||
170 | void disable_percpu_irq(unsigned int irq) | ||
171 | { | ||
172 | get_cpu_var(irq_disable_mask) |= (1UL << irq); | ||
173 | mask_irqs(1UL << irq); | ||
174 | put_cpu_var(irq_disable_mask); | ||
175 | } | ||
176 | EXPORT_SYMBOL(disable_percpu_irq); | ||
177 | |||
178 | /* Mask an interrupt. */ | ||
179 | static void tile_irq_chip_mask(unsigned int irq) | ||
180 | { | ||
181 | mask_irqs(1UL << irq); | ||
182 | } | ||
183 | |||
184 | /* Unmask an interrupt. */ | ||
185 | static void tile_irq_chip_unmask(unsigned int irq) | ||
186 | { | ||
187 | unmask_irqs(1UL << irq); | ||
188 | } | ||
189 | |||
190 | /* | ||
191 | * Clear an interrupt before processing it so that any new assertions | ||
192 | * will trigger another irq. | ||
193 | */ | ||
194 | static void tile_irq_chip_ack(unsigned int irq) | ||
195 | { | ||
196 | if ((unsigned long)get_irq_chip_data(irq) != IS_HW_CLEARED) | ||
197 | clear_irqs(1UL << irq); | ||
198 | } | ||
199 | |||
200 | /* | ||
201 | * For per-cpu interrupts, we need to avoid unmasking any interrupts | ||
202 | * that we disabled via disable_percpu_irq(). | ||
203 | */ | ||
204 | static void tile_irq_chip_eoi(unsigned int irq) | ||
205 | { | ||
206 | if (!(__get_cpu_var(irq_disable_mask) & (1UL << irq))) | ||
207 | unmask_irqs(1UL << irq); | ||
208 | } | ||
209 | |||
210 | static struct irq_chip tile_irq_chip = { | ||
211 | .typename = "tile_irq_chip", | ||
212 | .ack = tile_irq_chip_ack, | ||
213 | .eoi = tile_irq_chip_eoi, | ||
214 | .mask = tile_irq_chip_mask, | ||
215 | .unmask = tile_irq_chip_unmask, | ||
216 | }; | ||
217 | |||
218 | void __init init_IRQ(void) | ||
219 | { | ||
220 | ipi_init(); | ||
221 | } | ||
222 | |||
223 | void __cpuinit setup_irq_regs(void) | ||
224 | { | ||
225 | /* Enable interrupt delivery. */ | ||
226 | unmask_irqs(~0UL); | ||
227 | #if CHIP_HAS_IPI() | ||
228 | raw_local_irq_unmask(INT_IPI_1); | ||
229 | #endif | ||
230 | } | ||
231 | |||
232 | void tile_irq_activate(unsigned int irq, int tile_irq_type) | ||
233 | { | ||
234 | /* | ||
235 | * We use handle_level_irq() by default because the pending | ||
236 | * interrupt vector (whether modeled by the HV on TILE64 and | ||
237 | * TILEPro or implemented in hardware on TILE-Gx) has | ||
238 | * level-style semantics for each bit. An interrupt fires | ||
239 | * whenever a bit is high, not just at edges. | ||
240 | */ | ||
241 | irq_flow_handler_t handle = handle_level_irq; | ||
242 | if (tile_irq_type == TILE_IRQ_PERCPU) | ||
243 | handle = handle_percpu_irq; | ||
244 | set_irq_chip_and_handler(irq, &tile_irq_chip, handle); | ||
245 | |||
246 | /* | ||
247 | * Flag interrupts that are hardware-cleared so that ack() | ||
248 | * won't clear them. | ||
249 | */ | ||
250 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) | ||
251 | set_irq_chip_data(irq, (void *)IS_HW_CLEARED); | ||
252 | } | ||
253 | EXPORT_SYMBOL(tile_irq_activate); | ||
254 | |||
255 | |||
256 | void ack_bad_irq(unsigned int irq) | ||
257 | { | ||
258 | pr_err("unexpected IRQ trap at vector %02x\n", irq); | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Generic, controller-independent functions: | ||
263 | */ | ||
264 | |||
265 | int show_interrupts(struct seq_file *p, void *v) | ||
266 | { | ||
267 | int i = *(loff_t *) v, j; | ||
268 | struct irqaction *action; | ||
269 | unsigned long flags; | ||
270 | |||
271 | if (i == 0) { | ||
272 | seq_printf(p, " "); | ||
273 | for (j = 0; j < NR_CPUS; j++) | ||
274 | if (cpu_online(j)) | ||
275 | seq_printf(p, "CPU%-8d", j); | ||
276 | seq_putc(p, '\n'); | ||
277 | } | ||
278 | |||
279 | if (i < NR_IRQS) { | ||
280 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
281 | action = irq_desc[i].action; | ||
282 | if (!action) | ||
283 | goto skip; | ||
284 | seq_printf(p, "%3d: ", i); | ||
285 | #ifndef CONFIG_SMP | ||
286 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
287 | #else | ||
288 | for_each_online_cpu(j) | ||
289 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
290 | #endif | ||
291 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | ||
292 | seq_printf(p, " %s", action->name); | ||
293 | |||
294 | for (action = action->next; action; action = action->next) | ||
295 | seq_printf(p, ", %s", action->name); | ||
296 | |||
297 | seq_putc(p, '\n'); | ||
298 | skip: | ||
299 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
300 | } | ||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | #if CHIP_HAS_IPI() | ||
305 | int create_irq(void) | ||
306 | { | ||
307 | unsigned long flags; | ||
308 | int result; | ||
309 | |||
310 | spin_lock_irqsave(&available_irqs_lock, flags); | ||
311 | if (available_irqs == 0) | ||
312 | result = -ENOMEM; | ||
313 | else { | ||
314 | result = __ffs(available_irqs); | ||
315 | available_irqs &= ~(1UL << result); | ||
316 | dynamic_irq_init(result); | ||
317 | } | ||
318 | spin_unlock_irqrestore(&available_irqs_lock, flags); | ||
319 | |||
320 | return result; | ||
321 | } | ||
322 | EXPORT_SYMBOL(create_irq); | ||
323 | |||
324 | void destroy_irq(unsigned int irq) | ||
325 | { | ||
326 | unsigned long flags; | ||
327 | |||
328 | spin_lock_irqsave(&available_irqs_lock, flags); | ||
329 | available_irqs |= (1UL << irq); | ||
330 | dynamic_irq_cleanup(irq); | ||
331 | spin_unlock_irqrestore(&available_irqs_lock, flags); | ||
332 | } | ||
333 | EXPORT_SYMBOL(destroy_irq); | ||
334 | #endif | ||
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c new file mode 100644 index 000000000000..ba7a265d6179 --- /dev/null +++ b/arch/tile/kernel/machine_kexec.c | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on machine_kexec.c from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/kexec.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/reboot.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include <linux/cpumask.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/elf.h> | ||
26 | #include <linux/highmem.h> | ||
27 | #include <linux/mmu_context.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/pgalloc.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/checksum.h> | ||
34 | #include <hv/hypervisor.h> | ||
35 | |||
36 | |||
37 | /* | ||
38 | * This stuff is not in elf.h and is not in any other kernel include. | ||
39 | * This stuff is needed below in the little boot notes parser to | ||
40 | * extract the command line so we can pass it to the hypervisor. | ||
41 | */ | ||
42 | struct Elf32_Bhdr { | ||
43 | Elf32_Word b_signature; | ||
44 | Elf32_Word b_size; | ||
45 | Elf32_Half b_checksum; | ||
46 | Elf32_Half b_records; | ||
47 | }; | ||
48 | #define ELF_BOOT_MAGIC 0x0E1FB007 | ||
49 | #define EBN_COMMAND_LINE 0x00000004 | ||
50 | #define roundupsz(X) (((X) + 3) & ~3) | ||
51 | |||
52 | /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ | ||
53 | |||
54 | |||
55 | void machine_shutdown(void) | ||
56 | { | ||
57 | /* | ||
58 | * Normally we would stop all the other processors here, but | ||
59 | * the check in machine_kexec_prepare below ensures we'll only | ||
60 | * get this far if we've been booted with "nosmp" on the | ||
61 | * command line or without CONFIG_SMP so there's nothing to do | ||
62 | * here (for now). | ||
63 | */ | ||
64 | } | ||
65 | |||
66 | void machine_crash_shutdown(struct pt_regs *regs) | ||
67 | { | ||
68 | /* | ||
69 | * Cannot happen. This type of kexec is disabled on this | ||
70 | * architecture (and enforced in machine_kexec_prepare below). | ||
71 | */ | ||
72 | } | ||
73 | |||
74 | |||
75 | int machine_kexec_prepare(struct kimage *image) | ||
76 | { | ||
77 | if (num_online_cpus() > 1) { | ||
78 | pr_warning("%s: detected attempt to kexec " | ||
79 | "with num_online_cpus() > 1\n", | ||
80 | __func__); | ||
81 | return -ENOSYS; | ||
82 | } | ||
83 | if (image->type != KEXEC_TYPE_DEFAULT) { | ||
84 | pr_warning("%s: detected attempt to kexec " | ||
85 | "with unsupported type: %d\n", | ||
86 | __func__, | ||
87 | image->type); | ||
88 | return -ENOSYS; | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | void machine_kexec_cleanup(struct kimage *image) | ||
94 | { | ||
95 | /* | ||
96 | * We did nothing in machine_kexec_prepare, | ||
97 | * so we have nothing to do here. | ||
98 | */ | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * If we can find elf boot notes on this page, return the command | ||
103 | * line. Otherwise, silently return null. Somewhat kludgy, but no | ||
104 | * good way to do this without significantly rearchitecting the | ||
105 | * architecture-independent kexec code. | ||
106 | */ | ||
107 | |||
108 | static unsigned char *kexec_bn2cl(void *pg) | ||
109 | { | ||
110 | struct Elf32_Bhdr *bhdrp; | ||
111 | Elf32_Nhdr *nhdrp; | ||
112 | unsigned char *desc; | ||
113 | unsigned char *command_line; | ||
114 | __sum16 csum; | ||
115 | |||
116 | bhdrp = (struct Elf32_Bhdr *) pg; | ||
117 | |||
118 | /* | ||
119 | * This routine is invoked for every source page, so make | ||
120 | * sure to quietly ignore every impossible page. | ||
121 | */ | ||
122 | if (bhdrp->b_signature != ELF_BOOT_MAGIC || | ||
123 | bhdrp->b_size > PAGE_SIZE) | ||
124 | return 0; | ||
125 | |||
126 | /* | ||
127 | * If we get a checksum mismatch, warn with the checksum | ||
128 | * so we can diagnose better. | ||
129 | */ | ||
130 | csum = ip_compute_csum(pg, bhdrp->b_size); | ||
131 | if (csum != 0) { | ||
132 | pr_warning("%s: bad checksum %#x (size %d)\n", | ||
133 | __func__, csum, bhdrp->b_size); | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | nhdrp = (Elf32_Nhdr *) (bhdrp + 1); | ||
138 | |||
139 | while (nhdrp->n_type != EBN_COMMAND_LINE) { | ||
140 | |||
141 | desc = (unsigned char *) (nhdrp + 1); | ||
142 | desc += roundupsz(nhdrp->n_descsz); | ||
143 | |||
144 | nhdrp = (Elf32_Nhdr *) desc; | ||
145 | |||
146 | /* still in bounds? */ | ||
147 | if ((unsigned char *) (nhdrp + 1) > | ||
148 | ((unsigned char *) pg) + bhdrp->b_size) { | ||
149 | |||
150 | pr_info("%s: out of bounds\n", __func__); | ||
151 | return 0; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | command_line = (unsigned char *) (nhdrp + 1); | ||
156 | desc = command_line; | ||
157 | |||
158 | while (*desc != '\0') { | ||
159 | desc++; | ||
160 | if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { | ||
161 | pr_info("%s: ran off end of page\n", | ||
162 | __func__); | ||
163 | return 0; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | return command_line; | ||
168 | } | ||
169 | |||
170 | static void kexec_find_and_set_command_line(struct kimage *image) | ||
171 | { | ||
172 | kimage_entry_t *ptr, entry; | ||
173 | |||
174 | unsigned char *command_line = 0; | ||
175 | unsigned char *r; | ||
176 | HV_Errno hverr; | ||
177 | |||
178 | for (ptr = &image->head; | ||
179 | (entry = *ptr) && !(entry & IND_DONE); | ||
180 | ptr = (entry & IND_INDIRECTION) ? | ||
181 | phys_to_virt((entry & PAGE_MASK)) : ptr + 1) { | ||
182 | |||
183 | if ((entry & IND_SOURCE)) { | ||
184 | void *va = | ||
185 | kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); | ||
186 | r = kexec_bn2cl(va); | ||
187 | if (r) { | ||
188 | command_line = r; | ||
189 | break; | ||
190 | } | ||
191 | kunmap_atomic(va, KM_USER0); | ||
192 | } | ||
193 | } | ||
194 | |||
195 | if (command_line != 0) { | ||
196 | pr_info("setting new command line to \"%s\"\n", | ||
197 | command_line); | ||
198 | |||
199 | hverr = hv_set_command_line( | ||
200 | (HV_VirtAddr) command_line, strlen(command_line)); | ||
201 | kunmap_atomic(command_line, KM_USER0); | ||
202 | } else { | ||
203 | pr_info("%s: no command line found; making empty\n", | ||
204 | __func__); | ||
205 | hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); | ||
206 | } | ||
207 | if (hverr) | ||
208 | pr_warning("%s: hv_set_command_line returned error: %d\n", | ||
209 | __func__, hverr); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * The kexec code range-checks all its PAs, so to avoid having it run | ||
214 | * amok and allocate memory and then sequester it from every other | ||
215 | * controller, we force it to come from controller zero. We also | ||
216 | * disable the oom-killer since if we do end up running out of memory, | ||
217 | * that almost certainly won't help. | ||
218 | */ | ||
219 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) | ||
220 | { | ||
221 | gfp_mask |= __GFP_THISNODE | __GFP_NORETRY; | ||
222 | return alloc_pages_node(0, gfp_mask, order); | ||
223 | } | ||
224 | |||
225 | static void setup_quasi_va_is_pa(void) | ||
226 | { | ||
227 | HV_PTE *pgtable; | ||
228 | HV_PTE pte; | ||
229 | int i; | ||
230 | |||
231 | /* | ||
232 | * Flush our TLB to prevent conflicts between the previous contents | ||
233 | * and the new stuff we're about to add. | ||
234 | */ | ||
235 | local_flush_tlb_all(); | ||
236 | |||
237 | /* setup VA is PA, at least up to PAGE_OFFSET */ | ||
238 | |||
239 | pgtable = (HV_PTE *)current->mm->pgd; | ||
240 | pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); | ||
241 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
242 | |||
243 | for (i = 0; i < pgd_index(PAGE_OFFSET); i++) | ||
244 | pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte); | ||
245 | } | ||
246 | |||
247 | |||
248 | NORET_TYPE void machine_kexec(struct kimage *image) | ||
249 | { | ||
250 | void *reboot_code_buffer; | ||
251 | NORET_TYPE void (*rnk)(unsigned long, void *, unsigned long) | ||
252 | ATTRIB_NORET; | ||
253 | |||
254 | /* Mask all interrupts before starting to reboot. */ | ||
255 | interrupt_mask_set_mask(~0ULL); | ||
256 | |||
257 | kexec_find_and_set_command_line(image); | ||
258 | |||
259 | /* | ||
260 | * Adjust the home caching of the control page to be cached on | ||
261 | * this cpu, and copy the assembly helper into the control | ||
262 | * code page, which we map in the vmalloc area. | ||
263 | */ | ||
264 | homecache_change_page_home(image->control_code_page, 0, | ||
265 | smp_processor_id()); | ||
266 | reboot_code_buffer = vmap(&image->control_code_page, 1, 0, | ||
267 | __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); | ||
268 | memcpy(reboot_code_buffer, relocate_new_kernel, | ||
269 | relocate_new_kernel_size); | ||
270 | __flush_icache_range( | ||
271 | (unsigned long) reboot_code_buffer, | ||
272 | (unsigned long) reboot_code_buffer + relocate_new_kernel_size); | ||
273 | |||
274 | setup_quasi_va_is_pa(); | ||
275 | |||
276 | /* now call it */ | ||
277 | rnk = reboot_code_buffer; | ||
278 | (*rnk)(image->head, reboot_code_buffer, image->start); | ||
279 | } | ||
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c new file mode 100644 index 000000000000..6d23ed271d10 --- /dev/null +++ b/arch/tile/kernel/messaging.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/hardirq.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <asm/hv_driver.h> | ||
20 | #include <asm/irq_regs.h> | ||
21 | #include <asm/traps.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | #include <arch/interrupts.h> | ||
24 | |||
25 | /* All messages are stored here */ | ||
26 | static DEFINE_PER_CPU(HV_MsgState, msg_state); | ||
27 | |||
28 | void __cpuinit init_messaging(void) | ||
29 | { | ||
30 | /* Allocate storage for messages in kernel space */ | ||
31 | HV_MsgState *state = &__get_cpu_var(msg_state); | ||
32 | int rc = hv_register_message_state(state); | ||
33 | if (rc != HV_OK) | ||
34 | panic("hv_register_message_state: error %d", rc); | ||
35 | |||
36 | /* Make sure downcall interrupts will be enabled. */ | ||
37 | raw_local_irq_unmask(INT_INTCTRL_1); | ||
38 | } | ||
39 | |||
40 | void hv_message_intr(struct pt_regs *regs, int intnum) | ||
41 | { | ||
42 | /* | ||
43 | * We enter with interrupts disabled and leave them disabled, | ||
44 | * to match expectations of called functions (e.g. | ||
45 | * do_ccupdate_local() in mm/slab.c). This is also consistent | ||
46 | * with normal call entry for device interrupts. | ||
47 | */ | ||
48 | |||
49 | int message[HV_MAX_MESSAGE_SIZE/sizeof(int)]; | ||
50 | HV_RcvMsgInfo rmi; | ||
51 | int nmsgs = 0; | ||
52 | |||
53 | /* Track time spent here in an interrupt context */ | ||
54 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
55 | irq_enter(); | ||
56 | |||
57 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
58 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | ||
59 | { | ||
60 | long sp = stack_pointer - (long) current_thread_info(); | ||
61 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
62 | pr_emerg("hv_message_intr: " | ||
63 | "stack overflow: %ld\n", | ||
64 | sp - sizeof(struct thread_info)); | ||
65 | dump_stack(); | ||
66 | } | ||
67 | } | ||
68 | #endif | ||
69 | |||
70 | while (1) { | ||
71 | rmi = hv_receive_message(__get_cpu_var(msg_state), | ||
72 | (HV_VirtAddr) message, | ||
73 | sizeof(message)); | ||
74 | if (rmi.msglen == 0) | ||
75 | break; | ||
76 | |||
77 | if (rmi.msglen < 0) | ||
78 | panic("hv_receive_message failed: %d", rmi.msglen); | ||
79 | |||
80 | ++nmsgs; | ||
81 | |||
82 | if (rmi.source == HV_MSG_TILE) { | ||
83 | int tag; | ||
84 | |||
85 | /* we just send tags for now */ | ||
86 | BUG_ON(rmi.msglen != sizeof(int)); | ||
87 | |||
88 | tag = message[0]; | ||
89 | #ifdef CONFIG_SMP | ||
90 | evaluate_message(message[0]); | ||
91 | #else | ||
92 | panic("Received IPI message %d in UP mode", tag); | ||
93 | #endif | ||
94 | } else if (rmi.source == HV_MSG_INTR) { | ||
95 | HV_IntrMsg *him = (HV_IntrMsg *)message; | ||
96 | struct hv_driver_cb *cb = | ||
97 | (struct hv_driver_cb *)him->intarg; | ||
98 | cb->callback(cb, him->intdata); | ||
99 | __get_cpu_var(irq_stat).irq_hv_msg_count++; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * We shouldn't have gotten a message downcall with no | ||
105 | * messages available. | ||
106 | */ | ||
107 | if (nmsgs == 0) | ||
108 | panic("Message downcall invoked with no messages!"); | ||
109 | |||
110 | /* | ||
111 | * Track time spent against the current process again and | ||
112 | * process any softirqs if they are waiting. | ||
113 | */ | ||
114 | irq_exit(); | ||
115 | set_irq_regs(old_regs); | ||
116 | } | ||
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c new file mode 100644 index 000000000000..e2ab82b7c7e7 --- /dev/null +++ b/arch/tile/kernel/module.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Based on i386 version, copyright (C) 2001 Rusty Russell. | ||
15 | */ | ||
16 | |||
17 | #include <linux/moduleloader.h> | ||
18 | #include <linux/elf.h> | ||
19 | #include <linux/vmalloc.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <asm/opcode-tile.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | |||
26 | #ifdef __tilegx__ | ||
27 | # define Elf_Rela Elf64_Rela | ||
28 | # define ELF_R_SYM ELF64_R_SYM | ||
29 | # define ELF_R_TYPE ELF64_R_TYPE | ||
30 | #else | ||
31 | # define Elf_Rela Elf32_Rela | ||
32 | # define ELF_R_SYM ELF32_R_SYM | ||
33 | # define ELF_R_TYPE ELF32_R_TYPE | ||
34 | #endif | ||
35 | |||
36 | #ifdef MODULE_DEBUG | ||
37 | #define DEBUGP printk | ||
38 | #else | ||
39 | #define DEBUGP(fmt...) | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * Allocate some address space in the range MEM_MODULE_START to | ||
44 | * MEM_MODULE_END and populate it with memory. | ||
45 | */ | ||
46 | void *module_alloc(unsigned long size) | ||
47 | { | ||
48 | struct page **pages; | ||
49 | pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC); | ||
50 | struct vm_struct *area; | ||
51 | int i = 0; | ||
52 | int npages; | ||
53 | |||
54 | if (size == 0) | ||
55 | return NULL; | ||
56 | npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | ||
57 | pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); | ||
58 | if (pages == NULL) | ||
59 | return NULL; | ||
60 | for (; i < npages; ++i) { | ||
61 | pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
62 | if (!pages[i]) | ||
63 | goto error; | ||
64 | } | ||
65 | |||
66 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); | ||
67 | if (!area) | ||
68 | goto error; | ||
69 | |||
70 | if (map_vm_area(area, prot_rwx, &pages)) { | ||
71 | vunmap(area->addr); | ||
72 | goto error; | ||
73 | } | ||
74 | |||
75 | return area->addr; | ||
76 | |||
77 | error: | ||
78 | while (--i >= 0) | ||
79 | __free_page(pages[i]); | ||
80 | kfree(pages); | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | |||
85 | /* Free memory returned from module_alloc */ | ||
86 | void module_free(struct module *mod, void *module_region) | ||
87 | { | ||
88 | vfree(module_region); | ||
89 | /* | ||
90 | * FIXME: If module_region == mod->init_region, trim exception | ||
91 | * table entries. | ||
92 | */ | ||
93 | } | ||
94 | |||
95 | /* We don't need anything special. */ | ||
96 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
97 | Elf_Shdr *sechdrs, | ||
98 | char *secstrings, | ||
99 | struct module *mod) | ||
100 | { | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int apply_relocate(Elf_Shdr *sechdrs, | ||
105 | const char *strtab, | ||
106 | unsigned int symindex, | ||
107 | unsigned int relsec, | ||
108 | struct module *me) | ||
109 | { | ||
110 | pr_err("module %s: .rel relocation unsupported\n", me->name); | ||
111 | return -ENOEXEC; | ||
112 | } | ||
113 | |||
114 | #ifdef __tilegx__ | ||
115 | /* | ||
116 | * Validate that the high 16 bits of "value" is just the sign-extension of | ||
117 | * the low 48 bits. | ||
118 | */ | ||
119 | static int validate_hw2_last(long value, struct module *me) | ||
120 | { | ||
121 | if (((value << 16) >> 16) != value) { | ||
122 | pr_warning("module %s: Out of range HW2_LAST value %#lx\n", | ||
123 | me->name, value); | ||
124 | return 0; | ||
125 | } | ||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Validate that "value" isn't too big to hold in a JumpOff relocation. | ||
131 | */ | ||
132 | static int validate_jumpoff(long value) | ||
133 | { | ||
134 | /* Determine size of jump offset. */ | ||
135 | int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1))); | ||
136 | |||
137 | /* Check to see if it fits into the relocation slot. */ | ||
138 | long f = get_JumpOff_X1(create_JumpOff_X1(value)); | ||
139 | f = (f << shift) >> shift; | ||
140 | |||
141 | return f == value; | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | int apply_relocate_add(Elf_Shdr *sechdrs, | ||
146 | const char *strtab, | ||
147 | unsigned int symindex, | ||
148 | unsigned int relsec, | ||
149 | struct module *me) | ||
150 | { | ||
151 | unsigned int i; | ||
152 | Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
153 | Elf_Sym *sym; | ||
154 | u64 *location; | ||
155 | unsigned long value; | ||
156 | |||
157 | DEBUGP("Applying relocate section %u to %u\n", relsec, | ||
158 | sechdrs[relsec].sh_info); | ||
159 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
160 | /* This is where to make the change */ | ||
161 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
162 | + rel[i].r_offset; | ||
163 | /* | ||
164 | * This is the symbol it is referring to. | ||
165 | * Note that all undefined symbols have been resolved. | ||
166 | */ | ||
167 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
168 | + ELF_R_SYM(rel[i].r_info); | ||
169 | value = sym->st_value + rel[i].r_addend; | ||
170 | |||
171 | switch (ELF_R_TYPE(rel[i].r_info)) { | ||
172 | |||
173 | #define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value))) | ||
174 | |||
175 | #ifndef __tilegx__ | ||
176 | case R_TILE_32: | ||
177 | *(uint32_t *)location = value; | ||
178 | break; | ||
179 | case R_TILE_IMM16_X0_HA: | ||
180 | value = (value + 0x8000) >> 16; | ||
181 | /*FALLTHROUGH*/ | ||
182 | case R_TILE_IMM16_X0_LO: | ||
183 | MUNGE(create_Imm16_X0); | ||
184 | break; | ||
185 | case R_TILE_IMM16_X1_HA: | ||
186 | value = (value + 0x8000) >> 16; | ||
187 | /*FALLTHROUGH*/ | ||
188 | case R_TILE_IMM16_X1_LO: | ||
189 | MUNGE(create_Imm16_X1); | ||
190 | break; | ||
191 | case R_TILE_JOFFLONG_X1: | ||
192 | value -= (unsigned long) location; /* pc-relative */ | ||
193 | value = (long) value >> 3; /* count by instrs */ | ||
194 | MUNGE(create_JOffLong_X1); | ||
195 | break; | ||
196 | #else | ||
197 | case R_TILEGX_64: | ||
198 | *location = value; | ||
199 | break; | ||
200 | case R_TILEGX_IMM16_X0_HW2_LAST: | ||
201 | if (!validate_hw2_last(value, me)) | ||
202 | return -ENOEXEC; | ||
203 | value >>= 16; | ||
204 | /*FALLTHROUGH*/ | ||
205 | case R_TILEGX_IMM16_X0_HW1: | ||
206 | value >>= 16; | ||
207 | /*FALLTHROUGH*/ | ||
208 | case R_TILEGX_IMM16_X0_HW0: | ||
209 | MUNGE(create_Imm16_X0); | ||
210 | break; | ||
211 | case R_TILEGX_IMM16_X1_HW2_LAST: | ||
212 | if (!validate_hw2_last(value, me)) | ||
213 | return -ENOEXEC; | ||
214 | value >>= 16; | ||
215 | /*FALLTHROUGH*/ | ||
216 | case R_TILEGX_IMM16_X1_HW1: | ||
217 | value >>= 16; | ||
218 | /*FALLTHROUGH*/ | ||
219 | case R_TILEGX_IMM16_X1_HW0: | ||
220 | MUNGE(create_Imm16_X1); | ||
221 | break; | ||
222 | case R_TILEGX_JUMPOFF_X1: | ||
223 | value -= (unsigned long) location; /* pc-relative */ | ||
224 | value = (long) value >> 3; /* count by instrs */ | ||
225 | if (!validate_jumpoff(value)) { | ||
226 | pr_warning("module %s: Out of range jump to" | ||
227 | " %#llx at %#llx (%p)\n", me->name, | ||
228 | sym->st_value + rel[i].r_addend, | ||
229 | rel[i].r_offset, location); | ||
230 | return -ENOEXEC; | ||
231 | } | ||
232 | MUNGE(create_JumpOff_X1); | ||
233 | break; | ||
234 | #endif | ||
235 | |||
236 | #undef MUNGE | ||
237 | |||
238 | default: | ||
239 | pr_err("module %s: Unknown relocation: %d\n", | ||
240 | me->name, (int) ELF_R_TYPE(rel[i].r_info)); | ||
241 | return -ENOEXEC; | ||
242 | } | ||
243 | } | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | int module_finalize(const Elf_Ehdr *hdr, | ||
248 | const Elf_Shdr *sechdrs, | ||
249 | struct module *me) | ||
250 | { | ||
251 | /* FIXME: perhaps remove the "writable" bit from the TLB? */ | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | void module_arch_cleanup(struct module *mod) | ||
256 | { | ||
257 | } | ||
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c new file mode 100644 index 000000000000..5ad5e13b0fa6 --- /dev/null +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/homecache.h> | ||
20 | |||
21 | /* Generic DMA mapping functions: */ | ||
22 | |||
23 | /* | ||
24 | * Allocate what Linux calls "coherent" memory, which for us just | ||
25 | * means uncached. | ||
26 | */ | ||
27 | void *dma_alloc_coherent(struct device *dev, | ||
28 | size_t size, | ||
29 | dma_addr_t *dma_handle, | ||
30 | gfp_t gfp) | ||
31 | { | ||
32 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | ||
33 | int node = dev_to_node(dev); | ||
34 | int order = get_order(size); | ||
35 | struct page *pg; | ||
36 | dma_addr_t addr; | ||
37 | |||
38 | gfp |= __GFP_ZERO; | ||
39 | |||
40 | /* | ||
41 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | ||
42 | * high 32 bits of the resulting PA will be zero. If the mask | ||
43 | * size is, e.g., 24, we may still not be able to guarantee a | ||
44 | * suitable memory address, in which case we will return NULL. | ||
45 | * But such devices are uncommon. | ||
46 | */ | ||
47 | if (dma_mask <= DMA_BIT_MASK(32)) | ||
48 | node = 0; | ||
49 | |||
50 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | ||
51 | if (pg == NULL) | ||
52 | return NULL; | ||
53 | |||
54 | addr = page_to_phys(pg); | ||
55 | if (addr + size > dma_mask) { | ||
56 | homecache_free_pages(addr, order); | ||
57 | return NULL; | ||
58 | } | ||
59 | |||
60 | *dma_handle = addr; | ||
61 | return page_address(pg); | ||
62 | } | ||
63 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
64 | |||
65 | /* | ||
66 | * Free memory that was allocated with dma_alloc_coherent. | ||
67 | */ | ||
68 | void dma_free_coherent(struct device *dev, size_t size, | ||
69 | void *vaddr, dma_addr_t dma_handle) | ||
70 | { | ||
71 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||
72 | } | ||
73 | EXPORT_SYMBOL(dma_free_coherent); | ||
74 | |||
75 | /* | ||
76 | * The map routines "map" the specified address range for DMA | ||
77 | * accesses. The memory belongs to the device after this call is | ||
78 | * issued, until it is unmapped with dma_unmap_single. | ||
79 | * | ||
80 | * We don't need to do any mapping, we just flush the address range | ||
81 | * out of the cache and return a DMA address. | ||
82 | * | ||
83 | * The unmap routines do whatever is necessary before the processor | ||
84 | * accesses the memory again, and must be called before the driver | ||
85 | * touches the memory. We can get away with a cache invalidate if we | ||
86 | * can count on nothing having been touched. | ||
87 | */ | ||
88 | |||
89 | |||
90 | /* | ||
91 | * dma_map_single can be passed any memory address, and there appear | ||
92 | * to be no alignment constraints. | ||
93 | * | ||
94 | * There is a chance that the start of the buffer will share a cache | ||
95 | * line with some other data that has been touched in the meantime. | ||
96 | */ | ||
97 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
98 | enum dma_data_direction direction) | ||
99 | { | ||
100 | struct page *page; | ||
101 | dma_addr_t dma_addr; | ||
102 | int thispage; | ||
103 | |||
104 | BUG_ON(!valid_dma_direction(direction)); | ||
105 | WARN_ON(size == 0); | ||
106 | |||
107 | dma_addr = __pa(ptr); | ||
108 | |||
109 | /* We might have been handed a buffer that wraps a page boundary */ | ||
110 | while ((int)size > 0) { | ||
111 | /* The amount to flush that's on this page */ | ||
112 | thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1)); | ||
113 | thispage = min((int)thispage, (int)size); | ||
114 | /* Is this valid for any page we could be handed? */ | ||
115 | page = pfn_to_page(kaddr_to_pfn(ptr)); | ||
116 | homecache_flush_cache(page, 0); | ||
117 | ptr += thispage; | ||
118 | size -= thispage; | ||
119 | } | ||
120 | |||
121 | return dma_addr; | ||
122 | } | ||
123 | EXPORT_SYMBOL(dma_map_single); | ||
124 | |||
125 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
126 | enum dma_data_direction direction) | ||
127 | { | ||
128 | BUG_ON(!valid_dma_direction(direction)); | ||
129 | } | ||
130 | EXPORT_SYMBOL(dma_unmap_single); | ||
131 | |||
132 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
133 | enum dma_data_direction direction) | ||
134 | { | ||
135 | struct scatterlist *sg; | ||
136 | int i; | ||
137 | |||
138 | BUG_ON(!valid_dma_direction(direction)); | ||
139 | |||
140 | WARN_ON(nents == 0 || sglist->length == 0); | ||
141 | |||
142 | for_each_sg(sglist, sg, nents, i) { | ||
143 | struct page *page; | ||
144 | sg->dma_address = sg_phys(sg); | ||
145 | page = pfn_to_page(sg->dma_address >> PAGE_SHIFT); | ||
146 | homecache_flush_cache(page, 0); | ||
147 | } | ||
148 | |||
149 | return nents; | ||
150 | } | ||
151 | EXPORT_SYMBOL(dma_map_sg); | ||
152 | |||
153 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
154 | enum dma_data_direction direction) | ||
155 | { | ||
156 | BUG_ON(!valid_dma_direction(direction)); | ||
157 | } | ||
158 | EXPORT_SYMBOL(dma_unmap_sg); | ||
159 | |||
160 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
161 | unsigned long offset, size_t size, | ||
162 | enum dma_data_direction direction) | ||
163 | { | ||
164 | BUG_ON(!valid_dma_direction(direction)); | ||
165 | |||
166 | homecache_flush_cache(page, 0); | ||
167 | |||
168 | return page_to_pa(page) + offset; | ||
169 | } | ||
170 | EXPORT_SYMBOL(dma_map_page); | ||
171 | |||
172 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
173 | enum dma_data_direction direction) | ||
174 | { | ||
175 | BUG_ON(!valid_dma_direction(direction)); | ||
176 | } | ||
177 | EXPORT_SYMBOL(dma_unmap_page); | ||
178 | |||
179 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
180 | size_t size, enum dma_data_direction direction) | ||
181 | { | ||
182 | BUG_ON(!valid_dma_direction(direction)); | ||
183 | } | ||
184 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
185 | |||
186 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
187 | size_t size, enum dma_data_direction direction) | ||
188 | { | ||
189 | unsigned long start = PFN_DOWN(dma_handle); | ||
190 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | ||
191 | unsigned long i; | ||
192 | |||
193 | BUG_ON(!valid_dma_direction(direction)); | ||
194 | for (i = start; i <= end; ++i) | ||
195 | homecache_flush_cache(pfn_to_page(i), 0); | ||
196 | } | ||
197 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
198 | |||
199 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
200 | enum dma_data_direction direction) | ||
201 | { | ||
202 | BUG_ON(!valid_dma_direction(direction)); | ||
203 | WARN_ON(nelems == 0 || sg[0].length == 0); | ||
204 | } | ||
205 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
206 | |||
207 | /* | ||
208 | * Flush and invalidate cache for scatterlist. | ||
209 | */ | ||
210 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
211 | int nelems, enum dma_data_direction direction) | ||
212 | { | ||
213 | struct scatterlist *sg; | ||
214 | int i; | ||
215 | |||
216 | BUG_ON(!valid_dma_direction(direction)); | ||
217 | WARN_ON(nelems == 0 || sglist->length == 0); | ||
218 | |||
219 | for_each_sg(sglist, sg, nelems, i) { | ||
220 | dma_sync_single_for_device(dev, sg->dma_address, | ||
221 | sg_dma_len(sg), direction); | ||
222 | } | ||
223 | } | ||
224 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
225 | |||
226 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
227 | unsigned long offset, size_t size, | ||
228 | enum dma_data_direction direction) | ||
229 | { | ||
230 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | ||
231 | } | ||
232 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
233 | |||
234 | void dma_sync_single_range_for_device(struct device *dev, | ||
235 | dma_addr_t dma_handle, | ||
236 | unsigned long offset, size_t size, | ||
237 | enum dma_data_direction direction) | ||
238 | { | ||
239 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | ||
240 | } | ||
241 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
242 | |||
243 | /* | ||
244 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | ||
245 | * need to do any flushing here. | ||
246 | */ | ||
247 | void dma_cache_sync(void *vaddr, size_t size, | ||
248 | enum dma_data_direction direction) | ||
249 | { | ||
250 | } | ||
251 | EXPORT_SYMBOL(dma_cache_sync); | ||
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c new file mode 100644 index 000000000000..92ef925d2f8d --- /dev/null +++ b/arch/tile/kernel/proc.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/smp.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/timex.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/sysctl.h> | ||
24 | #include <linux/hardirq.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/sections.h> | ||
30 | #include <asm/homecache.h> | ||
31 | #include <arch/chip.h> | ||
32 | |||
33 | |||
34 | /* | ||
35 | * Support /proc/cpuinfo | ||
36 | */ | ||
37 | |||
38 | #define cpu_to_ptr(n) ((void *)((long)(n)+1)) | ||
39 | #define ptr_to_cpu(p) ((long)(p) - 1) | ||
40 | |||
41 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
42 | { | ||
43 | int n = ptr_to_cpu(v); | ||
44 | |||
45 | if (n == 0) { | ||
46 | char buf[NR_CPUS*5]; | ||
47 | cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask); | ||
48 | seq_printf(m, "cpu count\t: %d\n", num_online_cpus()); | ||
49 | seq_printf(m, "cpu list\t: %s\n", buf); | ||
50 | seq_printf(m, "model name\t: %s\n", chip_model); | ||
51 | seq_printf(m, "flags\t\t:\n"); /* nothing for now */ | ||
52 | seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n", | ||
53 | get_clock_rate() / 1000000, | ||
54 | (get_clock_rate() % 1000000)); | ||
55 | seq_printf(m, "bogomips\t: %lu.%02lu\n\n", | ||
56 | loops_per_jiffy/(500000/HZ), | ||
57 | (loops_per_jiffy/(5000/HZ)) % 100); | ||
58 | } | ||
59 | |||
60 | #ifdef CONFIG_SMP | ||
61 | if (!cpu_online(n)) | ||
62 | return 0; | ||
63 | #endif | ||
64 | |||
65 | seq_printf(m, "processor\t: %d\n", n); | ||
66 | |||
67 | /* Print only num_online_cpus() blank lines total. */ | ||
68 | if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) | ||
69 | seq_printf(m, "\n"); | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
75 | { | ||
76 | return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; | ||
77 | } | ||
78 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
79 | { | ||
80 | ++*pos; | ||
81 | return c_start(m, pos); | ||
82 | } | ||
83 | static void c_stop(struct seq_file *m, void *v) | ||
84 | { | ||
85 | } | ||
86 | const struct seq_operations cpuinfo_op = { | ||
87 | .start = c_start, | ||
88 | .next = c_next, | ||
89 | .stop = c_stop, | ||
90 | .show = show_cpuinfo, | ||
91 | }; | ||
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c new file mode 100644 index 000000000000..ed590ad0acdc --- /dev/null +++ b/arch/tile/kernel/process.c | |||
@@ -0,0 +1,671 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/preempt.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/elfcore.h> | ||
21 | #include <linux/tick.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/compat.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <asm/system.h> | ||
29 | #include <asm/stack.h> | ||
30 | #include <asm/homecache.h> | ||
31 | #include <asm/syscalls.h> | ||
32 | #ifdef CONFIG_HARDWALL | ||
33 | #include <asm/hardwall.h> | ||
34 | #endif | ||
35 | #include <arch/chip.h> | ||
36 | #include <arch/abi.h> | ||
37 | |||
38 | |||
39 | /* | ||
40 | * Use the (x86) "idle=poll" option to prefer low latency when leaving the | ||
41 | * idle loop over low power while in the idle loop, e.g. if we have | ||
42 | * one thread per core and we want to get threads out of futex waits fast. | ||
43 | */ | ||
44 | static int no_idle_nap; | ||
45 | static int __init idle_setup(char *str) | ||
46 | { | ||
47 | if (!str) | ||
48 | return -EINVAL; | ||
49 | |||
50 | if (!strcmp(str, "poll")) { | ||
51 | pr_info("using polling idle threads.\n"); | ||
52 | no_idle_nap = 1; | ||
53 | } else if (!strcmp(str, "halt")) | ||
54 | no_idle_nap = 0; | ||
55 | else | ||
56 | return -1; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | early_param("idle", idle_setup); | ||
61 | |||
62 | /* | ||
63 | * The idle thread. There's no useful work to be | ||
64 | * done, so just try to conserve power and have a | ||
65 | * low exit latency (ie sit in a loop waiting for | ||
66 | * somebody to say that they'd like to reschedule) | ||
67 | */ | ||
68 | void cpu_idle(void) | ||
69 | { | ||
70 | int cpu = smp_processor_id(); | ||
71 | |||
72 | |||
73 | current_thread_info()->status |= TS_POLLING; | ||
74 | |||
75 | if (no_idle_nap) { | ||
76 | while (1) { | ||
77 | while (!need_resched()) | ||
78 | cpu_relax(); | ||
79 | schedule(); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* endless idle loop with no priority at all */ | ||
84 | while (1) { | ||
85 | tick_nohz_stop_sched_tick(1); | ||
86 | while (!need_resched()) { | ||
87 | if (cpu_is_offline(cpu)) | ||
88 | BUG(); /* no HOTPLUG_CPU */ | ||
89 | |||
90 | local_irq_disable(); | ||
91 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | ||
92 | current_thread_info()->status &= ~TS_POLLING; | ||
93 | /* | ||
94 | * TS_POLLING-cleared state must be visible before we | ||
95 | * test NEED_RESCHED: | ||
96 | */ | ||
97 | smp_mb(); | ||
98 | |||
99 | if (!need_resched()) | ||
100 | _cpu_idle(); | ||
101 | else | ||
102 | local_irq_enable(); | ||
103 | current_thread_info()->status |= TS_POLLING; | ||
104 | } | ||
105 | tick_nohz_restart_sched_tick(); | ||
106 | preempt_enable_no_resched(); | ||
107 | schedule(); | ||
108 | preempt_disable(); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | struct thread_info *alloc_thread_info(struct task_struct *task) | ||
113 | { | ||
114 | struct page *page; | ||
115 | gfp_t flags = GFP_KERNEL; | ||
116 | |||
117 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
118 | flags |= __GFP_ZERO; | ||
119 | #endif | ||
120 | |||
121 | page = alloc_pages(flags, THREAD_SIZE_ORDER); | ||
122 | if (!page) | ||
123 | return NULL; | ||
124 | |||
125 | return (struct thread_info *)page_address(page); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Free a thread_info node, and all of its derivative | ||
130 | * data structures. | ||
131 | */ | ||
132 | void free_thread_info(struct thread_info *info) | ||
133 | { | ||
134 | struct single_step_state *step_state = info->step_state; | ||
135 | |||
136 | #ifdef CONFIG_HARDWALL | ||
137 | /* | ||
138 | * We free a thread_info from the context of the task that has | ||
139 | * been scheduled next, so the original task is already dead. | ||
140 | * Calling deactivate here just frees up the data structures. | ||
141 | * If the task we're freeing held the last reference to a | ||
142 | * hardwall fd, it would have been released prior to this point | ||
143 | * anyway via exit_files(), and "hardwall" would be NULL by now. | ||
144 | */ | ||
145 | if (info->task->thread.hardwall) | ||
146 | hardwall_deactivate(info->task); | ||
147 | #endif | ||
148 | |||
149 | if (step_state) { | ||
150 | |||
151 | /* | ||
152 | * FIXME: we don't munmap step_state->buffer | ||
153 | * because the mm_struct for this process (info->task->mm) | ||
154 | * has already been zeroed in exit_mm(). Keeping a | ||
155 | * reference to it here seems like a bad move, so this | ||
156 | * means we can't munmap() the buffer, and therefore if we | ||
157 | * ptrace multiple threads in a process, we will slowly | ||
158 | * leak user memory. (Note that as soon as the last | ||
159 | * thread in a process dies, we will reclaim all user | ||
160 | * memory including single-step buffers in the usual way.) | ||
161 | * We should either assign a kernel VA to this buffer | ||
162 | * somehow, or we should associate the buffer(s) with the | ||
163 | * mm itself so we can clean them up that way. | ||
164 | */ | ||
165 | kfree(step_state); | ||
166 | } | ||
167 | |||
168 | free_page((unsigned long)info); | ||
169 | } | ||
170 | |||
171 | static void save_arch_state(struct thread_struct *t); | ||
172 | |||
173 | int copy_thread(unsigned long clone_flags, unsigned long sp, | ||
174 | unsigned long stack_size, | ||
175 | struct task_struct *p, struct pt_regs *regs) | ||
176 | { | ||
177 | struct pt_regs *childregs; | ||
178 | unsigned long ksp; | ||
179 | |||
180 | /* | ||
181 | * When creating a new kernel thread we pass sp as zero. | ||
182 | * Assign it to a reasonable value now that we have the stack. | ||
183 | */ | ||
184 | if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0)) | ||
185 | sp = KSTK_TOP(p); | ||
186 | |||
187 | /* | ||
188 | * Do not clone step state from the parent; each thread | ||
189 | * must make its own lazily. | ||
190 | */ | ||
191 | task_thread_info(p)->step_state = NULL; | ||
192 | |||
193 | /* | ||
194 | * Start new thread in ret_from_fork so it schedules properly | ||
195 | * and then return from interrupt like the parent. | ||
196 | */ | ||
197 | p->thread.pc = (unsigned long) ret_from_fork; | ||
198 | |||
199 | /* Save user stack top pointer so we can ID the stack vm area later. */ | ||
200 | p->thread.usp0 = sp; | ||
201 | |||
202 | /* Record the pid of the process that created this one. */ | ||
203 | p->thread.creator_pid = current->pid; | ||
204 | |||
205 | /* | ||
206 | * Copy the registers onto the kernel stack so the | ||
207 | * return-from-interrupt code will reload it into registers. | ||
208 | */ | ||
209 | childregs = task_pt_regs(p); | ||
210 | *childregs = *regs; | ||
211 | childregs->regs[0] = 0; /* return value is zero */ | ||
212 | childregs->sp = sp; /* override with new user stack pointer */ | ||
213 | |||
214 | /* | ||
215 | * Copy the callee-saved registers from the passed pt_regs struct | ||
216 | * into the context-switch callee-saved registers area. | ||
217 | * We have to restore the callee-saved registers since we may | ||
218 | * be cloning a userspace task with userspace register state, | ||
219 | * and we won't be unwinding the same kernel frames to restore them. | ||
220 | * Zero out the C ABI save area to mark the top of the stack. | ||
221 | */ | ||
222 | ksp = (unsigned long) childregs; | ||
223 | ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */ | ||
224 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
225 | ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long); | ||
226 | memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG], | ||
227 | CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long)); | ||
228 | ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */ | ||
229 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
230 | p->thread.ksp = ksp; | ||
231 | |||
232 | #if CHIP_HAS_TILE_DMA() | ||
233 | /* | ||
234 | * No DMA in the new thread. We model this on the fact that | ||
235 | * fork() clears the pending signals, alarms, and aio for the child. | ||
236 | */ | ||
237 | memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state)); | ||
238 | memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); | ||
239 | #endif | ||
240 | |||
241 | #if CHIP_HAS_SN_PROC() | ||
242 | /* Likewise, the new thread is not running static processor code. */ | ||
243 | p->thread.sn_proc_running = 0; | ||
244 | memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb)); | ||
245 | #endif | ||
246 | |||
247 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
248 | /* New thread has its miscellaneous processor state bits clear. */ | ||
249 | p->thread.proc_status = 0; | ||
250 | #endif | ||
251 | |||
252 | #ifdef CONFIG_HARDWALL | ||
253 | /* New thread does not own any networks. */ | ||
254 | p->thread.hardwall = NULL; | ||
255 | #endif | ||
256 | |||
257 | |||
258 | /* | ||
259 | * Start the new thread with the current architecture state | ||
260 | * (user interrupt masks, etc.). | ||
261 | */ | ||
262 | save_arch_state(&p->thread); | ||
263 | |||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * Return "current" if it looks plausible, or else a pointer to a dummy. | ||
269 | * This can be helpful if we are just trying to emit a clean panic. | ||
270 | */ | ||
271 | struct task_struct *validate_current(void) | ||
272 | { | ||
273 | static struct task_struct corrupt = { .comm = "<corrupt>" }; | ||
274 | struct task_struct *tsk = current; | ||
275 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || | ||
276 | (void *)tsk > high_memory || | ||
277 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { | ||
278 | pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); | ||
279 | tsk = &corrupt; | ||
280 | } | ||
281 | return tsk; | ||
282 | } | ||
283 | |||
284 | /* Take and return the pointer to the previous task, for schedule_tail(). */ | ||
285 | struct task_struct *sim_notify_fork(struct task_struct *prev) | ||
286 | { | ||
287 | struct task_struct *tsk = current; | ||
288 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT | | ||
289 | (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
290 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK | | ||
291 | (tsk->pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
292 | return prev; | ||
293 | } | ||
294 | |||
295 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | ||
296 | { | ||
297 | struct pt_regs *ptregs = task_pt_regs(tsk); | ||
298 | elf_core_copy_regs(regs, ptregs); | ||
299 | return 1; | ||
300 | } | ||
301 | |||
302 | #if CHIP_HAS_TILE_DMA() | ||
303 | |||
304 | /* Allow user processes to access the DMA SPRs */ | ||
305 | void grant_dma_mpls(void) | ||
306 | { | ||
307 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); | ||
308 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); | ||
309 | } | ||
310 | |||
311 | /* Forbid user processes from accessing the DMA SPRs */ | ||
312 | void restrict_dma_mpls(void) | ||
313 | { | ||
314 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); | ||
315 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); | ||
316 | } | ||
317 | |||
318 | /* Pause the DMA engine, then save off its state registers. */ | ||
319 | static void save_tile_dma_state(struct tile_dma_state *dma) | ||
320 | { | ||
321 | unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
322 | unsigned long post_suspend_state; | ||
323 | |||
324 | /* If we're running, suspend the engine. */ | ||
325 | if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) | ||
326 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | ||
327 | |||
328 | /* | ||
329 | * Wait for the engine to idle, then save regs. Note that we | ||
330 | * want to record the "running" bit from before suspension, | ||
331 | * and the "done" bit from after, so that we can properly | ||
332 | * distinguish a case where the user suspended the engine from | ||
333 | * the case where the kernel suspended as part of the context | ||
334 | * swap. | ||
335 | */ | ||
336 | do { | ||
337 | post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
338 | } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK); | ||
339 | |||
340 | dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR); | ||
341 | dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR); | ||
342 | dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR); | ||
343 | dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR); | ||
344 | dma->strides = __insn_mfspr(SPR_DMA_STRIDE); | ||
345 | dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE); | ||
346 | dma->byte = __insn_mfspr(SPR_DMA_BYTE); | ||
347 | dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) | | ||
348 | (post_suspend_state & SPR_DMA_STATUS__DONE_MASK); | ||
349 | } | ||
350 | |||
351 | /* Restart a DMA that was running before we were context-switched out. */ | ||
352 | static void restore_tile_dma_state(struct thread_struct *t) | ||
353 | { | ||
354 | const struct tile_dma_state *dma = &t->tile_dma_state; | ||
355 | |||
356 | /* | ||
357 | * The only way to restore the done bit is to run a zero | ||
358 | * length transaction. | ||
359 | */ | ||
360 | if ((dma->status & SPR_DMA_STATUS__DONE_MASK) && | ||
361 | !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) { | ||
362 | __insn_mtspr(SPR_DMA_BYTE, 0); | ||
363 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
364 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | ||
365 | SPR_DMA_STATUS__BUSY_MASK) | ||
366 | ; | ||
367 | } | ||
368 | |||
369 | __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src); | ||
370 | __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk); | ||
371 | __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest); | ||
372 | __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk); | ||
373 | __insn_mtspr(SPR_DMA_STRIDE, dma->strides); | ||
374 | __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size); | ||
375 | __insn_mtspr(SPR_DMA_BYTE, dma->byte); | ||
376 | |||
377 | /* | ||
378 | * Restart the engine if we were running and not done. | ||
379 | * Clear a pending async DMA fault that we were waiting on return | ||
380 | * to user space to execute, since we expect the DMA engine | ||
381 | * to regenerate those faults for us now. Note that we don't | ||
382 | * try to clear the TIF_ASYNC_TLB flag, since it's relatively | ||
383 | * harmless if set, and it covers both DMA and the SN processor. | ||
384 | */ | ||
385 | if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) { | ||
386 | t->dma_async_tlb.fault_num = 0; | ||
387 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
388 | } | ||
389 | } | ||
390 | |||
391 | #endif | ||
392 | |||
393 | static void save_arch_state(struct thread_struct *t) | ||
394 | { | ||
395 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
396 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) | | ||
397 | ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32); | ||
398 | #else | ||
399 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0); | ||
400 | #endif | ||
401 | t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0); | ||
402 | t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1); | ||
403 | t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0); | ||
404 | t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1); | ||
405 | t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2); | ||
406 | t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3); | ||
407 | t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS); | ||
408 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
409 | t->proc_status = __insn_mfspr(SPR_PROC_STATUS); | ||
410 | #endif | ||
411 | } | ||
412 | |||
413 | static void restore_arch_state(const struct thread_struct *t) | ||
414 | { | ||
415 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
416 | __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask); | ||
417 | __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32); | ||
418 | #else | ||
419 | __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask); | ||
420 | #endif | ||
421 | __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]); | ||
422 | __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]); | ||
423 | __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]); | ||
424 | __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]); | ||
425 | __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]); | ||
426 | __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]); | ||
427 | __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0); | ||
428 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
429 | __insn_mtspr(SPR_PROC_STATUS, t->proc_status); | ||
430 | #endif | ||
431 | #if CHIP_HAS_TILE_RTF_HWM() | ||
432 | /* | ||
433 | * Clear this whenever we switch back to a process in case | ||
434 | * the previous process was monkeying with it. Even if enabled | ||
435 | * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a | ||
436 | * performance hint, so isn't worth a full save/restore. | ||
437 | */ | ||
438 | __insn_mtspr(SPR_TILE_RTF_HWM, 0); | ||
439 | #endif | ||
440 | } | ||
441 | |||
442 | |||
443 | void _prepare_arch_switch(struct task_struct *next) | ||
444 | { | ||
445 | #if CHIP_HAS_SN_PROC() | ||
446 | int snctl; | ||
447 | #endif | ||
448 | #if CHIP_HAS_TILE_DMA() | ||
449 | struct tile_dma_state *dma = ¤t->thread.tile_dma_state; | ||
450 | if (dma->enabled) | ||
451 | save_tile_dma_state(dma); | ||
452 | #endif | ||
453 | #if CHIP_HAS_SN_PROC() | ||
454 | /* | ||
455 | * Suspend the static network processor if it was running. | ||
456 | * We do not suspend the fabric itself, just like we don't | ||
457 | * try to suspend the UDN. | ||
458 | */ | ||
459 | snctl = __insn_mfspr(SPR_SNCTL); | ||
460 | current->thread.sn_proc_running = | ||
461 | (snctl & SPR_SNCTL__FRZPROC_MASK) == 0; | ||
462 | if (current->thread.sn_proc_running) | ||
463 | __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK); | ||
464 | #endif | ||
465 | } | ||
466 | |||
467 | |||
468 | struct task_struct *__sched _switch_to(struct task_struct *prev, | ||
469 | struct task_struct *next) | ||
470 | { | ||
471 | /* DMA state is already saved; save off other arch state. */ | ||
472 | save_arch_state(&prev->thread); | ||
473 | |||
474 | #if CHIP_HAS_TILE_DMA() | ||
475 | /* | ||
476 | * Restore DMA in new task if desired. | ||
477 | * Note that it is only safe to restart here since interrupts | ||
478 | * are disabled, so we can't take any DMATLB miss or access | ||
479 | * interrupts before we have finished switching stacks. | ||
480 | */ | ||
481 | if (next->thread.tile_dma_state.enabled) { | ||
482 | restore_tile_dma_state(&next->thread); | ||
483 | grant_dma_mpls(); | ||
484 | } else { | ||
485 | restrict_dma_mpls(); | ||
486 | } | ||
487 | #endif | ||
488 | |||
489 | /* Restore other arch state. */ | ||
490 | restore_arch_state(&next->thread); | ||
491 | |||
492 | #if CHIP_HAS_SN_PROC() | ||
493 | /* | ||
494 | * Restart static network processor in the new process | ||
495 | * if it was running before. | ||
496 | */ | ||
497 | if (next->thread.sn_proc_running) { | ||
498 | int snctl = __insn_mfspr(SPR_SNCTL); | ||
499 | __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK); | ||
500 | } | ||
501 | #endif | ||
502 | |||
503 | #ifdef CONFIG_HARDWALL | ||
504 | /* Enable or disable access to the network registers appropriately. */ | ||
505 | if (prev->thread.hardwall != NULL) { | ||
506 | if (next->thread.hardwall == NULL) | ||
507 | restrict_network_mpls(); | ||
508 | } else if (next->thread.hardwall != NULL) { | ||
509 | grant_network_mpls(); | ||
510 | } | ||
511 | #endif | ||
512 | |||
513 | /* | ||
514 | * Switch kernel SP, PC, and callee-saved registers. | ||
515 | * In the context of the new task, return the old task pointer | ||
516 | * (i.e. the task that actually called __switch_to). | ||
517 | * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. | ||
518 | */ | ||
519 | return __switch_to(prev, next, next_current_ksp0(next)); | ||
520 | } | ||
521 | |||
522 | long _sys_fork(struct pt_regs *regs) | ||
523 | { | ||
524 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
525 | } | ||
526 | |||
527 | long _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
528 | void __user *parent_tidptr, void __user *child_tidptr, | ||
529 | struct pt_regs *regs) | ||
530 | { | ||
531 | if (!newsp) | ||
532 | newsp = regs->sp; | ||
533 | return do_fork(clone_flags, newsp, regs, 0, | ||
534 | parent_tidptr, child_tidptr); | ||
535 | } | ||
536 | |||
537 | long _sys_vfork(struct pt_regs *regs) | ||
538 | { | ||
539 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, | ||
540 | regs, 0, NULL, NULL); | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * sys_execve() executes a new program. | ||
545 | */ | ||
546 | long _sys_execve(char __user *path, char __user *__user *argv, | ||
547 | char __user *__user *envp, struct pt_regs *regs) | ||
548 | { | ||
549 | long error; | ||
550 | char *filename; | ||
551 | |||
552 | filename = getname(path); | ||
553 | error = PTR_ERR(filename); | ||
554 | if (IS_ERR(filename)) | ||
555 | goto out; | ||
556 | error = do_execve(filename, argv, envp, regs); | ||
557 | putname(filename); | ||
558 | out: | ||
559 | return error; | ||
560 | } | ||
561 | |||
562 | #ifdef CONFIG_COMPAT | ||
563 | long _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
564 | compat_uptr_t __user *envp, struct pt_regs *regs) | ||
565 | { | ||
566 | long error; | ||
567 | char *filename; | ||
568 | |||
569 | filename = getname(path); | ||
570 | error = PTR_ERR(filename); | ||
571 | if (IS_ERR(filename)) | ||
572 | goto out; | ||
573 | error = compat_do_execve(filename, argv, envp, regs); | ||
574 | putname(filename); | ||
575 | out: | ||
576 | return error; | ||
577 | } | ||
578 | #endif | ||
579 | |||
580 | unsigned long get_wchan(struct task_struct *p) | ||
581 | { | ||
582 | struct KBacktraceIterator kbt; | ||
583 | |||
584 | if (!p || p == current || p->state == TASK_RUNNING) | ||
585 | return 0; | ||
586 | |||
587 | for (KBacktraceIterator_init(&kbt, p, NULL); | ||
588 | !KBacktraceIterator_end(&kbt); | ||
589 | KBacktraceIterator_next(&kbt)) { | ||
590 | if (!in_sched_functions(kbt.it.pc)) | ||
591 | return kbt.it.pc; | ||
592 | } | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * We pass in lr as zero (cleared in kernel_thread) and the caller | ||
599 | * part of the backtrace ABI on the stack also zeroed (in copy_thread) | ||
600 | * so that backtraces will stop with this function. | ||
601 | * Note that we don't use r0, since copy_thread() clears it. | ||
602 | */ | ||
603 | static void start_kernel_thread(int dummy, int (*fn)(int), int arg) | ||
604 | { | ||
605 | do_exit(fn(arg)); | ||
606 | } | ||
607 | |||
608 | /* | ||
609 | * Create a kernel thread | ||
610 | */ | ||
611 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
612 | { | ||
613 | struct pt_regs regs; | ||
614 | |||
615 | memset(®s, 0, sizeof(regs)); | ||
616 | regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */ | ||
617 | regs.pc = (long) start_kernel_thread; | ||
618 | regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */ | ||
619 | regs.regs[1] = (long) fn; /* function pointer */ | ||
620 | regs.regs[2] = (long) arg; /* parameter register */ | ||
621 | |||
622 | /* Ok, create the new process.. */ | ||
623 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, | ||
624 | 0, NULL, NULL); | ||
625 | } | ||
626 | EXPORT_SYMBOL(kernel_thread); | ||
627 | |||
628 | /* Flush thread state. */ | ||
629 | void flush_thread(void) | ||
630 | { | ||
631 | /* Nothing */ | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Free current thread data structures etc.. | ||
636 | */ | ||
637 | void exit_thread(void) | ||
638 | { | ||
639 | /* Nothing */ | ||
640 | } | ||
641 | |||
642 | void show_regs(struct pt_regs *regs) | ||
643 | { | ||
644 | struct task_struct *tsk = validate_current(); | ||
645 | int i; | ||
646 | |||
647 | pr_err("\n"); | ||
648 | pr_err(" Pid: %d, comm: %20s, CPU: %d\n", | ||
649 | tsk->pid, tsk->comm, smp_processor_id()); | ||
650 | #ifdef __tilegx__ | ||
651 | for (i = 0; i < 51; i += 3) | ||
652 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | ||
653 | i, regs->regs[i], i+1, regs->regs[i+1], | ||
654 | i+2, regs->regs[i+2]); | ||
655 | pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n", | ||
656 | regs->regs[51], regs->regs[52], regs->tp); | ||
657 | pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); | ||
658 | #else | ||
659 | for (i = 0; i < 52; i += 3) | ||
660 | pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT | ||
661 | " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", | ||
662 | i, regs->regs[i], i+1, regs->regs[i+1], | ||
663 | i+2, regs->regs[i+2], i+3, regs->regs[i+3]); | ||
664 | pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", | ||
665 | regs->regs[52], regs->tp, regs->sp, regs->lr); | ||
666 | #endif | ||
667 | pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", | ||
668 | regs->pc, regs->ex1, regs->faultnum); | ||
669 | |||
670 | dump_stack_regs(regs); | ||
671 | } | ||
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c new file mode 100644 index 000000000000..7161bd03d2fd --- /dev/null +++ b/arch/tile/kernel/ptrace.c | |||
@@ -0,0 +1,205 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Copied from i386: Ross Biro 1/23/92 | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/compat.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <asm/traps.h> | ||
23 | |||
24 | void user_enable_single_step(struct task_struct *child) | ||
25 | { | ||
26 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
27 | } | ||
28 | |||
29 | void user_disable_single_step(struct task_struct *child) | ||
30 | { | ||
31 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
32 | } | ||
33 | |||
34 | /* | ||
35 | * This routine will put a word on the process's privileged stack. | ||
36 | */ | ||
37 | static void putreg(struct task_struct *task, | ||
38 | unsigned long addr, unsigned long value) | ||
39 | { | ||
40 | unsigned int regno = addr / sizeof(unsigned long); | ||
41 | struct pt_regs *childregs = task_pt_regs(task); | ||
42 | childregs->regs[regno] = value; | ||
43 | childregs->flags |= PT_FLAGS_RESTORE_REGS; | ||
44 | } | ||
45 | |||
46 | static unsigned long getreg(struct task_struct *task, unsigned long addr) | ||
47 | { | ||
48 | unsigned int regno = addr / sizeof(unsigned long); | ||
49 | struct pt_regs *childregs = task_pt_regs(task); | ||
50 | return childregs->regs[regno]; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * Called by kernel/ptrace.c when detaching.. | ||
55 | */ | ||
56 | void ptrace_disable(struct task_struct *child) | ||
57 | { | ||
58 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
59 | |||
60 | /* | ||
61 | * These two are currently unused, but will be set by arch_ptrace() | ||
62 | * and used in the syscall assembly when we do support them. | ||
63 | */ | ||
64 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
65 | } | ||
66 | |||
67 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
68 | { | ||
69 | unsigned long __user *datap; | ||
70 | unsigned long tmp; | ||
71 | int i; | ||
72 | long ret = -EIO; | ||
73 | |||
74 | #ifdef CONFIG_COMPAT | ||
75 | if (task_thread_info(current)->status & TS_COMPAT) | ||
76 | data = (u32)data; | ||
77 | if (task_thread_info(child)->status & TS_COMPAT) | ||
78 | addr = (u32)addr; | ||
79 | #endif | ||
80 | datap = (unsigned long __user __force *)data; | ||
81 | |||
82 | switch (request) { | ||
83 | |||
84 | case PTRACE_PEEKUSR: /* Read register from pt_regs. */ | ||
85 | if (addr & (sizeof(data)-1)) | ||
86 | break; | ||
87 | if (addr < 0 || addr >= PTREGS_SIZE) | ||
88 | break; | ||
89 | tmp = getreg(child, addr); /* Read register */ | ||
90 | ret = put_user(tmp, datap); | ||
91 | break; | ||
92 | |||
93 | case PTRACE_POKEUSR: /* Write register in pt_regs. */ | ||
94 | if (addr & (sizeof(data)-1)) | ||
95 | break; | ||
96 | if (addr < 0 || addr >= PTREGS_SIZE) | ||
97 | break; | ||
98 | putreg(child, addr, data); /* Write register */ | ||
99 | ret = 0; | ||
100 | break; | ||
101 | |||
102 | case PTRACE_GETREGS: /* Get all registers from the child. */ | ||
103 | if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) | ||
104 | break; | ||
105 | for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { | ||
106 | ret = __put_user(getreg(child, i), datap); | ||
107 | if (ret != 0) | ||
108 | break; | ||
109 | datap++; | ||
110 | } | ||
111 | break; | ||
112 | |||
113 | case PTRACE_SETREGS: /* Set all registers in the child. */ | ||
114 | if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) | ||
115 | break; | ||
116 | for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { | ||
117 | ret = __get_user(tmp, datap); | ||
118 | if (ret != 0) | ||
119 | break; | ||
120 | putreg(child, i, tmp); | ||
121 | datap++; | ||
122 | } | ||
123 | break; | ||
124 | |||
125 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
126 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
127 | break; | ||
128 | |||
129 | case PTRACE_SETOPTIONS: | ||
130 | /* Support TILE-specific ptrace options. */ | ||
131 | child->ptrace &= ~PT_TRACE_MASK_TILE; | ||
132 | tmp = data & PTRACE_O_MASK_TILE; | ||
133 | data &= ~PTRACE_O_MASK_TILE; | ||
134 | ret = ptrace_request(child, request, addr, data); | ||
135 | if (tmp & PTRACE_O_TRACEMIGRATE) | ||
136 | child->ptrace |= PT_TRACE_MIGRATE; | ||
137 | break; | ||
138 | |||
139 | default: | ||
140 | #ifdef CONFIG_COMPAT | ||
141 | if (task_thread_info(current)->status & TS_COMPAT) { | ||
142 | ret = compat_ptrace_request(child, request, | ||
143 | addr, data); | ||
144 | break; | ||
145 | } | ||
146 | #endif | ||
147 | ret = ptrace_request(child, request, addr, data); | ||
148 | break; | ||
149 | } | ||
150 | |||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | #ifdef CONFIG_COMPAT | ||
155 | /* Not used; we handle compat issues in arch_ptrace() directly. */ | ||
156 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
157 | compat_ulong_t addr, compat_ulong_t data) | ||
158 | { | ||
159 | BUG(); | ||
160 | } | ||
161 | #endif | ||
162 | |||
163 | void do_syscall_trace(void) | ||
164 | { | ||
165 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
166 | return; | ||
167 | |||
168 | if (!(current->ptrace & PT_PTRACED)) | ||
169 | return; | ||
170 | |||
171 | /* | ||
172 | * The 0x80 provides a way for the tracing parent to distinguish | ||
173 | * between a syscall stop and SIGTRAP delivery | ||
174 | */ | ||
175 | ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | ||
176 | |||
177 | /* | ||
178 | * this isn't the same as continuing with a signal, but it will do | ||
179 | * for normal use. strace only continues with a signal if the | ||
180 | * stopping signal is not SIGTRAP. -brl | ||
181 | */ | ||
182 | if (current->exit_code) { | ||
183 | send_sig(current->exit_code, current, 1); | ||
184 | current->exit_code = 0; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | ||
189 | { | ||
190 | struct siginfo info; | ||
191 | |||
192 | memset(&info, 0, sizeof(info)); | ||
193 | info.si_signo = SIGTRAP; | ||
194 | info.si_code = TRAP_BRKPT; | ||
195 | info.si_addr = (void __user *) regs->pc; | ||
196 | |||
197 | /* Send us the fakey SIGTRAP */ | ||
198 | force_sig_info(SIGTRAP, &info, tsk); | ||
199 | } | ||
200 | |||
201 | /* Handle synthetic interrupt delivered only by the simulator. */ | ||
202 | void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num) | ||
203 | { | ||
204 | send_sigtrap(current, regs, fault_num); | ||
205 | } | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c new file mode 100644 index 000000000000..acd86d20beba --- /dev/null +++ b/arch/tile/kernel/reboot.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/stddef.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/pm.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/setup.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #ifndef CONFIG_SMP | ||
24 | #define smp_send_stop() | ||
25 | #endif | ||
26 | |||
27 | void machine_halt(void) | ||
28 | { | ||
29 | warn_early_printk(); | ||
30 | raw_local_irq_disable_all(); | ||
31 | smp_send_stop(); | ||
32 | hv_halt(); | ||
33 | } | ||
34 | |||
35 | void machine_power_off(void) | ||
36 | { | ||
37 | warn_early_printk(); | ||
38 | raw_local_irq_disable_all(); | ||
39 | smp_send_stop(); | ||
40 | hv_power_off(); | ||
41 | } | ||
42 | |||
43 | void machine_restart(char *cmd) | ||
44 | { | ||
45 | raw_local_irq_disable_all(); | ||
46 | smp_send_stop(); | ||
47 | hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); | ||
48 | } | ||
49 | |||
50 | /* No interesting distinction to be made here. */ | ||
51 | void (*pm_power_off)(void) = NULL; | ||
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S new file mode 100644 index 000000000000..e88d6e122783 --- /dev/null +++ b/arch/tile/kernel/regs_32.S | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/asm-offsets.h> | ||
19 | #include <arch/spr_def.h> | ||
20 | #include <asm/processor.h> | ||
21 | |||
22 | /* | ||
23 | * See <asm/system.h>; called with prev and next task_struct pointers. | ||
24 | * "prev" is returned in r0 for _switch_to and also for ret_from_fork. | ||
25 | * | ||
26 | * We want to save pc/sp in "prev", and get the new pc/sp from "next". | ||
27 | * We also need to save all the callee-saved registers on the stack. | ||
28 | * | ||
29 | * Intel enables/disables access to the hardware cycle counter in | ||
30 | * seccomp (secure computing) environments if necessary, based on | ||
31 | * has_secure_computing(). We might want to do this at some point, | ||
32 | * though it would require virtualizing the other SPRs under WORLD_ACCESS. | ||
33 | * | ||
34 | * Since we're saving to the stack, we omit sp from this list. | ||
35 | * And for parallels with other architectures, we save lr separately, | ||
36 | * in the thread_struct itself (as the "pc" field). | ||
37 | * | ||
38 | * This code also needs to be aligned with process.c copy_thread() | ||
39 | */ | ||
40 | |||
41 | #if CALLEE_SAVED_REGS_COUNT != 24 | ||
42 | # error Mismatch between <asm/system.h> and kernel/entry.S | ||
43 | #endif | ||
44 | #define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4) | ||
45 | |||
46 | #define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 } | ||
47 | #define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 } | ||
48 | #define FOR_EACH_CALLEE_SAVED_REG(f) \ | ||
49 | f(r30); f(r31); \ | ||
50 | f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \ | ||
51 | f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \ | ||
52 | f(r48); f(r49); f(r50); f(r51); f(r52); | ||
53 | |||
54 | STD_ENTRY_SECTION(__switch_to, .sched.text) | ||
55 | { | ||
56 | move r10, sp | ||
57 | sw sp, lr | ||
58 | addi sp, sp, -FRAME_SIZE | ||
59 | } | ||
60 | { | ||
61 | addi r11, sp, 4 | ||
62 | addi r12, sp, 8 | ||
63 | } | ||
64 | { | ||
65 | sw r11, r10 | ||
66 | addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET | ||
67 | } | ||
68 | { | ||
69 | lw r13, r4 /* Load new sp to a temp register early. */ | ||
70 | addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET | ||
71 | } | ||
72 | FOR_EACH_CALLEE_SAVED_REG(SAVE_REG) | ||
73 | { | ||
74 | sw r3, sp | ||
75 | addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET | ||
76 | } | ||
77 | { | ||
78 | sw r3, lr | ||
79 | addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET | ||
80 | } | ||
81 | { | ||
82 | lw lr, r4 | ||
83 | addi r12, r13, 8 | ||
84 | } | ||
85 | { | ||
86 | /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ | ||
87 | move sp, r13 | ||
88 | mtspr SYSTEM_SAVE_1_0, r2 | ||
89 | } | ||
90 | FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) | ||
91 | .L__switch_to_pc: | ||
92 | { | ||
93 | addi sp, sp, FRAME_SIZE | ||
94 | jrp lr /* r0 is still valid here, so return it */ | ||
95 | } | ||
96 | STD_ENDPROC(__switch_to) | ||
97 | |||
98 | /* Return a suitable address for the backtracer for suspended threads */ | ||
99 | STD_ENTRY_SECTION(get_switch_to_pc, .sched.text) | ||
100 | lnk r0 | ||
101 | { | ||
102 | addli r0, r0, .L__switch_to_pc - . | ||
103 | jrp lr | ||
104 | } | ||
105 | STD_ENDPROC(get_switch_to_pc) | ||
106 | |||
107 | STD_ENTRY(get_pt_regs) | ||
108 | .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \ | ||
109 | r8, r9, r10, r11, r12, r13, r14, r15, \ | ||
110 | r16, r17, r18, r19, r20, r21, r22, r23, \ | ||
111 | r24, r25, r26, r27, r28, r29, r30, r31, \ | ||
112 | r32, r33, r34, r35, r36, r37, r38, r39, \ | ||
113 | r40, r41, r42, r43, r44, r45, r46, r47, \ | ||
114 | r48, r49, r50, r51, r52, tp, sp | ||
115 | { | ||
116 | sw r0, \reg | ||
117 | addi r0, r0, 4 | ||
118 | } | ||
119 | .endr | ||
120 | { | ||
121 | sw r0, lr | ||
122 | addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR | ||
123 | } | ||
124 | lnk r1 | ||
125 | { | ||
126 | sw r0, r1 | ||
127 | addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
128 | } | ||
129 | mfspr r1, INTERRUPT_CRITICAL_SECTION | ||
130 | shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT | ||
131 | ori r1, r1, KERNEL_PL | ||
132 | { | ||
133 | sw r0, r1 | ||
134 | addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
135 | } | ||
136 | { | ||
137 | sw r0, zero /* clear faultnum */ | ||
138 | addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM | ||
139 | } | ||
140 | { | ||
141 | sw r0, zero /* clear orig_r0 */ | ||
142 | addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */ | ||
143 | } | ||
144 | jrp lr | ||
145 | STD_ENDPROC(get_pt_regs) | ||
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel.S new file mode 100644 index 000000000000..010b418515f8 --- /dev/null +++ b/arch/tile/kernel/relocate_kernel.S | |||
@@ -0,0 +1,280 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * copy new kernel into place and then call hv_reexec | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <arch/chip.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA | ||
24 | |||
25 | #define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f)) | ||
26 | |||
27 | #define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC) | ||
28 | #define ___hv_halt ___hv_dispatch(HV_DISPATCH_HALT) | ||
29 | #define ___hv_reexec ___hv_dispatch(HV_DISPATCH_REEXEC) | ||
30 | #define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE) | ||
31 | |||
32 | #undef RELOCATE_NEW_KERNEL_VERBOSE | ||
33 | |||
34 | STD_ENTRY(relocate_new_kernel) | ||
35 | |||
36 | move r30, r0 /* page list */ | ||
37 | move r31, r1 /* address of page we are on */ | ||
38 | move r32, r2 /* start address of new kernel */ | ||
39 | |||
40 | shri r1, r1, PAGE_SHIFT | ||
41 | addi r1, r1, 1 | ||
42 | shli sp, r1, PAGE_SHIFT | ||
43 | addi sp, sp, -8 | ||
44 | /* we now have a stack (whether we need one or not) */ | ||
45 | |||
46 | moveli r40, lo16(___hv_console_putc) | ||
47 | auli r40, r40, ha16(___hv_console_putc) | ||
48 | |||
49 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
50 | moveli r0, 'r' | ||
51 | jalr r40 | ||
52 | |||
53 | moveli r0, '_' | ||
54 | jalr r40 | ||
55 | |||
56 | moveli r0, 'n' | ||
57 | jalr r40 | ||
58 | |||
59 | moveli r0, '_' | ||
60 | jalr r40 | ||
61 | |||
62 | moveli r0, 'k' | ||
63 | jalr r40 | ||
64 | |||
65 | moveli r0, '\n' | ||
66 | jalr r40 | ||
67 | #endif | ||
68 | |||
69 | /* | ||
70 | * Throughout this code r30 is pointer to the element of page | ||
71 | * list we are working on. | ||
72 | * | ||
73 | * Normally we get to the next element of the page list by | ||
74 | * incrementing r30 by four. The exception is if the element | ||
75 | * on the page list is an IND_INDIRECTION in which case we use | ||
76 | * the element with the low bits masked off as the new value | ||
77 | * of r30. | ||
78 | * | ||
79 | * To get this started, we need the value passed to us (which | ||
80 | * will always be an IND_INDIRECTION) in memory somewhere with | ||
81 | * r30 pointing at it. To do that, we push the value passed | ||
82 | * to us on the stack and make r30 point to it. | ||
83 | */ | ||
84 | |||
85 | sw sp, r30 | ||
86 | move r30, sp | ||
87 | addi sp, sp, -8 | ||
88 | |||
89 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
90 | /* | ||
91 | * On TILEPro, we need to flush all tiles' caches, since we may | ||
92 | * have been doing hash-for-home caching there. Note that we | ||
93 | * must do this _after_ we're completely done modifying any memory | ||
94 | * other than our output buffer (which we know is locally cached). | ||
95 | * We want the caches to be fully clean when we do the reexec, | ||
96 | * because the hypervisor is going to do this flush again at that | ||
97 | * point, and we don't want that second flush to overwrite any memory. | ||
98 | */ | ||
99 | { | ||
100 | move r0, zero /* cache_pa */ | ||
101 | move r1, zero | ||
102 | } | ||
103 | { | ||
104 | auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */ | ||
105 | movei r3, -1 /* cache_cpumask; -1 means all client tiles */ | ||
106 | } | ||
107 | { | ||
108 | move r4, zero /* tlb_va */ | ||
109 | move r5, zero /* tlb_length */ | ||
110 | } | ||
111 | { | ||
112 | move r6, zero /* tlb_pgsize */ | ||
113 | move r7, zero /* tlb_cpumask */ | ||
114 | } | ||
115 | { | ||
116 | move r8, zero /* asids */ | ||
117 | moveli r20, lo16(___hv_flush_remote) | ||
118 | } | ||
119 | { | ||
120 | move r9, zero /* asidcount */ | ||
121 | auli r20, r20, ha16(___hv_flush_remote) | ||
122 | } | ||
123 | |||
124 | jalr r20 | ||
125 | #endif | ||
126 | |||
127 | /* r33 is destination pointer, default to zero */ | ||
128 | |||
129 | moveli r33, 0 | ||
130 | |||
131 | .Lloop: lw r10, r30 | ||
132 | |||
133 | andi r9, r10, 0xf /* low 4 bits tell us what type it is */ | ||
134 | xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */ | ||
135 | |||
136 | seqi r0, r9, 0x1 /* IND_DESTINATION */ | ||
137 | bzt r0, .Ltry2 | ||
138 | |||
139 | move r33, r10 | ||
140 | |||
141 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
142 | moveli r0, 'd' | ||
143 | jalr r40 | ||
144 | #endif | ||
145 | |||
146 | addi r30, r30, 4 | ||
147 | j .Lloop | ||
148 | |||
149 | .Ltry2: | ||
150 | seqi r0, r9, 0x2 /* IND_INDIRECTION */ | ||
151 | bzt r0, .Ltry4 | ||
152 | |||
153 | move r30, r10 | ||
154 | |||
155 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
156 | moveli r0, 'i' | ||
157 | jalr r40 | ||
158 | #endif | ||
159 | |||
160 | j .Lloop | ||
161 | |||
162 | .Ltry4: | ||
163 | seqi r0, r9, 0x4 /* IND_DONE */ | ||
164 | bzt r0, .Ltry8 | ||
165 | |||
166 | mf | ||
167 | |||
168 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
169 | moveli r0, 'D' | ||
170 | jalr r40 | ||
171 | moveli r0, '\n' | ||
172 | jalr r40 | ||
173 | #endif | ||
174 | |||
175 | move r0, r32 | ||
176 | moveli r1, 0 /* arg to hv_reexec is 64 bits */ | ||
177 | |||
178 | moveli r41, lo16(___hv_reexec) | ||
179 | auli r41, r41, ha16(___hv_reexec) | ||
180 | |||
181 | jalr r41 | ||
182 | |||
183 | /* we should not get here */ | ||
184 | |||
185 | moveli r0, '?' | ||
186 | jalr r40 | ||
187 | moveli r0, '\n' | ||
188 | jalr r40 | ||
189 | |||
190 | j .Lhalt | ||
191 | |||
192 | .Ltry8: seqi r0, r9, 0x8 /* IND_SOURCE */ | ||
193 | bz r0, .Lerr /* unknown type */ | ||
194 | |||
195 | /* copy page at r10 to page at r33 */ | ||
196 | |||
197 | move r11, r33 | ||
198 | |||
199 | moveli r0, lo16(PAGE_SIZE) | ||
200 | auli r0, r0, ha16(PAGE_SIZE) | ||
201 | add r33, r33, r0 | ||
202 | |||
203 | /* copy word at r10 to word at r11 until r11 equals r33 */ | ||
204 | |||
205 | /* We know page size must be multiple of 16, so we can unroll | ||
206 | * 16 times safely without any edge case checking. | ||
207 | * | ||
208 | * Issue a flush of the destination every 16 words to avoid | ||
209 | * incoherence when starting the new kernel. (Now this is | ||
210 | * just good paranoia because the hv_reexec call will also | ||
211 | * take care of this.) | ||
212 | */ | ||
213 | |||
214 | 1: | ||
215 | { lw r0, r10; addi r10, r10, 4 } | ||
216 | { sw r11, r0; addi r11, r11, 4 } | ||
217 | { lw r0, r10; addi r10, r10, 4 } | ||
218 | { sw r11, r0; addi r11, r11, 4 } | ||
219 | { lw r0, r10; addi r10, r10, 4 } | ||
220 | { sw r11, r0; addi r11, r11, 4 } | ||
221 | { lw r0, r10; addi r10, r10, 4 } | ||
222 | { sw r11, r0; addi r11, r11, 4 } | ||
223 | { lw r0, r10; addi r10, r10, 4 } | ||
224 | { sw r11, r0; addi r11, r11, 4 } | ||
225 | { lw r0, r10; addi r10, r10, 4 } | ||
226 | { sw r11, r0; addi r11, r11, 4 } | ||
227 | { lw r0, r10; addi r10, r10, 4 } | ||
228 | { sw r11, r0; addi r11, r11, 4 } | ||
229 | { lw r0, r10; addi r10, r10, 4 } | ||
230 | { sw r11, r0; addi r11, r11, 4 } | ||
231 | { lw r0, r10; addi r10, r10, 4 } | ||
232 | { sw r11, r0; addi r11, r11, 4 } | ||
233 | { lw r0, r10; addi r10, r10, 4 } | ||
234 | { sw r11, r0; addi r11, r11, 4 } | ||
235 | { lw r0, r10; addi r10, r10, 4 } | ||
236 | { sw r11, r0; addi r11, r11, 4 } | ||
237 | { lw r0, r10; addi r10, r10, 4 } | ||
238 | { sw r11, r0; addi r11, r11, 4 } | ||
239 | { lw r0, r10; addi r10, r10, 4 } | ||
240 | { sw r11, r0; addi r11, r11, 4 } | ||
241 | { lw r0, r10; addi r10, r10, 4 } | ||
242 | { sw r11, r0; addi r11, r11, 4 } | ||
243 | { lw r0, r10; addi r10, r10, 4 } | ||
244 | { sw r11, r0; addi r11, r11, 4 } | ||
245 | { lw r0, r10; addi r10, r10, 4 } | ||
246 | { sw r11, r0 } | ||
247 | { flush r11 ; addi r11, r11, 4 } | ||
248 | |||
249 | seq r0, r33, r11 | ||
250 | bzt r0, 1b | ||
251 | |||
252 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
253 | moveli r0, 's' | ||
254 | jalr r40 | ||
255 | #endif | ||
256 | |||
257 | addi r30, r30, 4 | ||
258 | j .Lloop | ||
259 | |||
260 | |||
261 | .Lerr: moveli r0, 'e' | ||
262 | jalr r40 | ||
263 | moveli r0, 'r' | ||
264 | jalr r40 | ||
265 | moveli r0, 'r' | ||
266 | jalr r40 | ||
267 | moveli r0, '\n' | ||
268 | jalr r40 | ||
269 | .Lhalt: | ||
270 | moveli r41, lo16(___hv_halt) | ||
271 | auli r41, r41, ha16(___hv_halt) | ||
272 | |||
273 | jalr r41 | ||
274 | STD_ENDPROC(relocate_new_kernel) | ||
275 | |||
276 | .section .rodata,"a" | ||
277 | |||
278 | .globl relocate_new_kernel_size | ||
279 | relocate_new_kernel_size: | ||
280 | .long .Lend_relocate_new_kernel - relocate_new_kernel | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c new file mode 100644 index 000000000000..4dd21c1e6d5e --- /dev/null +++ b/arch/tile/kernel/setup.c | |||
@@ -0,0 +1,1511 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mmzone.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/node.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/ioport.h> | ||
23 | #include <linux/irq.h> | ||
24 | #include <linux/kexec.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/initrd.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/highmem.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/timex.h> | ||
31 | #include <asm/setup.h> | ||
32 | #include <asm/sections.h> | ||
33 | #include <asm/sections.h> | ||
34 | #include <asm/cacheflush.h> | ||
35 | #include <asm/cacheflush.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/mmu_context.h> | ||
38 | #include <hv/hypervisor.h> | ||
39 | #include <arch/interrupts.h> | ||
40 | |||
41 | /* <linux/smp.h> doesn't provide this definition. */ | ||
42 | #ifndef CONFIG_SMP | ||
43 | #define setup_max_cpus 1 | ||
44 | #endif | ||
45 | |||
46 | static inline int ABS(int x) { return x >= 0 ? x : -x; } | ||
47 | |||
48 | /* Chip information */ | ||
49 | char chip_model[64] __write_once; | ||
50 | |||
51 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | ||
52 | EXPORT_SYMBOL(node_data); | ||
53 | |||
54 | /* We only create bootmem data on node 0. */ | ||
55 | static bootmem_data_t __initdata node0_bdata; | ||
56 | |||
57 | /* Information on the NUMA nodes that we compute early */ | ||
58 | unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; | ||
59 | unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; | ||
60 | unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; | ||
61 | unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; | ||
62 | unsigned long __initdata node_free_pfn[MAX_NUMNODES]; | ||
63 | |||
64 | #ifdef CONFIG_HIGHMEM | ||
65 | /* Page frame index of end of lowmem on each controller. */ | ||
66 | unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; | ||
67 | |||
68 | /* Number of pages that can be mapped into lowmem. */ | ||
69 | static unsigned long __initdata mappable_physpages; | ||
70 | #endif | ||
71 | |||
72 | /* Data on which physical memory controller corresponds to which NUMA node */ | ||
73 | int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; | ||
74 | |||
75 | #ifdef CONFIG_HIGHMEM | ||
76 | /* Map information from VAs to PAs */ | ||
77 | unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] | ||
78 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
79 | EXPORT_SYMBOL(pbase_map); | ||
80 | |||
81 | /* Map information from PAs to VAs */ | ||
82 | void *vbase_map[NR_PA_HIGHBIT_VALUES] | ||
83 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
84 | EXPORT_SYMBOL(vbase_map); | ||
85 | #endif | ||
86 | |||
87 | /* Node number as a function of the high PA bits */ | ||
88 | int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; | ||
89 | EXPORT_SYMBOL(highbits_to_node); | ||
90 | |||
91 | static unsigned int __initdata maxmem_pfn = -1U; | ||
92 | static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | ||
93 | [0 ... MAX_NUMNODES-1] = -1U | ||
94 | }; | ||
95 | static nodemask_t __initdata isolnodes; | ||
96 | |||
97 | #ifdef CONFIG_PCI | ||
98 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; | ||
99 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | ||
100 | unsigned long __initdata pci_reserve_start_pfn = -1U; | ||
101 | unsigned long __initdata pci_reserve_end_pfn = -1U; | ||
102 | #endif | ||
103 | |||
104 | static int __init setup_maxmem(char *str) | ||
105 | { | ||
106 | long maxmem_mb; | ||
107 | if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || | ||
108 | maxmem_mb == 0) | ||
109 | return -EINVAL; | ||
110 | |||
111 | maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << | ||
112 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
113 | pr_info("Forcing RAM used to no more than %dMB\n", | ||
114 | maxmem_pfn >> (20 - PAGE_SHIFT)); | ||
115 | return 0; | ||
116 | } | ||
117 | early_param("maxmem", setup_maxmem); | ||
118 | |||
119 | static int __init setup_maxnodemem(char *str) | ||
120 | { | ||
121 | char *endp; | ||
122 | long maxnodemem_mb, node; | ||
123 | |||
124 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | ||
125 | if (node >= MAX_NUMNODES || *endp != ':' || | ||
126 | strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) | ||
127 | return -EINVAL; | ||
128 | |||
129 | maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << | ||
130 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
131 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", | ||
132 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); | ||
133 | return 0; | ||
134 | } | ||
135 | early_param("maxnodemem", setup_maxnodemem); | ||
136 | |||
137 | static int __init setup_isolnodes(char *str) | ||
138 | { | ||
139 | char buf[MAX_NUMNODES * 5]; | ||
140 | if (str == NULL || nodelist_parse(str, isolnodes) != 0) | ||
141 | return -EINVAL; | ||
142 | |||
143 | nodelist_scnprintf(buf, sizeof(buf), isolnodes); | ||
144 | pr_info("Set isolnodes value to '%s'\n", buf); | ||
145 | return 0; | ||
146 | } | ||
147 | early_param("isolnodes", setup_isolnodes); | ||
148 | |||
149 | #ifdef CONFIG_PCI | ||
150 | static int __init setup_pci_reserve(char* str) | ||
151 | { | ||
152 | unsigned long mb; | ||
153 | |||
154 | if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || | ||
155 | mb > 3 * 1024) | ||
156 | return -EINVAL; | ||
157 | |||
158 | pci_reserve_mb = mb; | ||
159 | pr_info("Reserving %dMB for PCIE root complex mappings\n", | ||
160 | pci_reserve_mb); | ||
161 | return 0; | ||
162 | } | ||
163 | early_param("pci_reserve", setup_pci_reserve); | ||
164 | #endif | ||
165 | |||
166 | #ifndef __tilegx__ | ||
167 | /* | ||
168 | * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. | ||
169 | * This can be used to increase (or decrease) the vmalloc area. | ||
170 | */ | ||
171 | static int __init parse_vmalloc(char *arg) | ||
172 | { | ||
173 | if (!arg) | ||
174 | return -EINVAL; | ||
175 | |||
176 | VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
177 | |||
178 | /* See validate_va() for more on this test. */ | ||
179 | if ((long)_VMALLOC_START >= 0) | ||
180 | early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", | ||
181 | VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | early_param("vmalloc", parse_vmalloc); | ||
186 | #endif | ||
187 | |||
188 | #ifdef CONFIG_HIGHMEM | ||
189 | /* | ||
190 | * Determine for each controller where its lowmem is mapped and how | ||
191 | * much of it is mapped there. On controller zero, the first few | ||
192 | * megabytes are mapped at 0xfd000000 as code, so in principle we | ||
193 | * could start our data mappings higher up, but for now we don't | ||
194 | * bother, to avoid additional confusion. | ||
195 | * | ||
196 | * One question is whether, on systems with more than 768 Mb and | ||
197 | * controllers of different sizes, to map in a proportionate amount of | ||
198 | * each one, or to try to map the same amount from each controller. | ||
199 | * (E.g. if we have three controllers with 256MB, 1GB, and 256MB | ||
200 | * respectively, do we map 256MB from each, or do we map 128 MB, 512 | ||
201 | * MB, and 128 MB respectively?) For now we use a proportionate | ||
202 | * solution like the latter. | ||
203 | * | ||
204 | * The VA/PA mapping demands that we align our decisions at 16 MB | ||
205 | * boundaries so that we can rapidly convert VA to PA. | ||
206 | */ | ||
207 | static void *__init setup_pa_va_mapping(void) | ||
208 | { | ||
209 | unsigned long curr_pages = 0; | ||
210 | unsigned long vaddr = PAGE_OFFSET; | ||
211 | nodemask_t highonlynodes = isolnodes; | ||
212 | int i, j; | ||
213 | |||
214 | memset(pbase_map, -1, sizeof(pbase_map)); | ||
215 | memset(vbase_map, -1, sizeof(vbase_map)); | ||
216 | |||
217 | /* Node zero cannot be isolated for LOWMEM purposes. */ | ||
218 | node_clear(0, highonlynodes); | ||
219 | |||
220 | /* Count up the number of pages on non-highonlynodes controllers. */ | ||
221 | mappable_physpages = 0; | ||
222 | for_each_online_node(i) { | ||
223 | if (!node_isset(i, highonlynodes)) | ||
224 | mappable_physpages += | ||
225 | node_end_pfn[i] - node_start_pfn[i]; | ||
226 | } | ||
227 | |||
228 | for_each_online_node(i) { | ||
229 | unsigned long start = node_start_pfn[i]; | ||
230 | unsigned long end = node_end_pfn[i]; | ||
231 | unsigned long size = end - start; | ||
232 | unsigned long vaddr_end; | ||
233 | |||
234 | if (node_isset(i, highonlynodes)) { | ||
235 | /* Mark this controller as having no lowmem. */ | ||
236 | node_lowmem_end_pfn[i] = start; | ||
237 | continue; | ||
238 | } | ||
239 | |||
240 | curr_pages += size; | ||
241 | if (mappable_physpages > MAXMEM_PFN) { | ||
242 | vaddr_end = PAGE_OFFSET + | ||
243 | (((u64)curr_pages * MAXMEM_PFN / | ||
244 | mappable_physpages) | ||
245 | << PAGE_SHIFT); | ||
246 | } else { | ||
247 | vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); | ||
248 | } | ||
249 | for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { | ||
250 | unsigned long this_pfn = | ||
251 | start + (j << HUGETLB_PAGE_ORDER); | ||
252 | pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; | ||
253 | if (vbase_map[__pfn_to_highbits(this_pfn)] == | ||
254 | (void *)-1) | ||
255 | vbase_map[__pfn_to_highbits(this_pfn)] = | ||
256 | (void *)(vaddr & HPAGE_MASK); | ||
257 | } | ||
258 | node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); | ||
259 | BUG_ON(node_lowmem_end_pfn[i] > end); | ||
260 | } | ||
261 | |||
262 | /* Return highest address of any mapped memory. */ | ||
263 | return (void *)vaddr; | ||
264 | } | ||
265 | #endif /* CONFIG_HIGHMEM */ | ||
266 | |||
267 | /* | ||
268 | * Register our most important memory mappings with the debug stub. | ||
269 | * | ||
270 | * This is up to 4 mappings for lowmem, one mapping per memory | ||
271 | * controller, plus one for our text segment. | ||
272 | */ | ||
273 | static void __cpuinit store_permanent_mappings(void) | ||
274 | { | ||
275 | int i; | ||
276 | |||
277 | for_each_online_node(i) { | ||
278 | HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; | ||
279 | #ifdef CONFIG_HIGHMEM | ||
280 | HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; | ||
281 | #else | ||
282 | HV_PhysAddr high_mapped_pa = node_end_pfn[i]; | ||
283 | #endif | ||
284 | |||
285 | unsigned long pages = high_mapped_pa - node_start_pfn[i]; | ||
286 | HV_VirtAddr addr = (HV_VirtAddr) __va(pa); | ||
287 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); | ||
288 | } | ||
289 | |||
290 | hv_store_mapping((HV_VirtAddr)_stext, | ||
291 | (uint32_t)(_einittext - _stext), 0); | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Use hv_inquire_physical() to populate node_{start,end}_pfn[] | ||
296 | * and node_online_map, doing suitable sanity-checking. | ||
297 | * Also set min_low_pfn, max_low_pfn, and max_pfn. | ||
298 | */ | ||
299 | static void __init setup_memory(void) | ||
300 | { | ||
301 | int i, j; | ||
302 | int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; | ||
303 | #ifdef CONFIG_HIGHMEM | ||
304 | long highmem_pages; | ||
305 | #endif | ||
306 | #ifndef __tilegx__ | ||
307 | int cap; | ||
308 | #endif | ||
309 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) | ||
310 | long lowmem_pages; | ||
311 | #endif | ||
312 | |||
313 | /* We are using a char to hold the cpu_2_node[] mapping */ | ||
314 | BUG_ON(MAX_NUMNODES > 127); | ||
315 | |||
316 | /* Discover the ranges of memory available to us */ | ||
317 | for (i = 0; ; ++i) { | ||
318 | unsigned long start, size, end, highbits; | ||
319 | HV_PhysAddrRange range = hv_inquire_physical(i); | ||
320 | if (range.size == 0) | ||
321 | break; | ||
322 | #ifdef CONFIG_FLATMEM | ||
323 | if (i > 0) { | ||
324 | pr_err("Can't use discontiguous PAs: %#llx..%#llx\n", | ||
325 | range.size, range.start + range.size); | ||
326 | continue; | ||
327 | } | ||
328 | #endif | ||
329 | #ifndef __tilegx__ | ||
330 | if ((unsigned long)range.start) { | ||
331 | pr_err("Range not at 4GB multiple: %#llx..%#llx\n", | ||
332 | range.start, range.start + range.size); | ||
333 | continue; | ||
334 | } | ||
335 | #endif | ||
336 | if ((range.start & (HPAGE_SIZE-1)) != 0 || | ||
337 | (range.size & (HPAGE_SIZE-1)) != 0) { | ||
338 | unsigned long long start_pa = range.start; | ||
339 | unsigned long long orig_size = range.size; | ||
340 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; | ||
341 | range.size -= (range.start - start_pa); | ||
342 | range.size &= HPAGE_MASK; | ||
343 | pr_err("Range not hugepage-aligned: %#llx..%#llx:" | ||
344 | " now %#llx-%#llx\n", | ||
345 | start_pa, start_pa + orig_size, | ||
346 | range.start, range.start + range.size); | ||
347 | } | ||
348 | highbits = __pa_to_highbits(range.start); | ||
349 | if (highbits >= NR_PA_HIGHBIT_VALUES) { | ||
350 | pr_err("PA high bits too high: %#llx..%#llx\n", | ||
351 | range.start, range.start + range.size); | ||
352 | continue; | ||
353 | } | ||
354 | if (highbits_seen[highbits]) { | ||
355 | pr_err("Range overlaps in high bits: %#llx..%#llx\n", | ||
356 | range.start, range.start + range.size); | ||
357 | continue; | ||
358 | } | ||
359 | highbits_seen[highbits] = 1; | ||
360 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { | ||
361 | int max_size = maxnodemem_pfn[i]; | ||
362 | if (max_size > 0) { | ||
363 | pr_err("Maxnodemem reduced node %d to" | ||
364 | " %d pages\n", i, max_size); | ||
365 | range.size = PFN_PHYS(max_size); | ||
366 | } else { | ||
367 | pr_err("Maxnodemem disabled node %d\n", i); | ||
368 | continue; | ||
369 | } | ||
370 | } | ||
371 | if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { | ||
372 | int max_size = maxmem_pfn - num_physpages; | ||
373 | if (max_size > 0) { | ||
374 | pr_err("Maxmem reduced node %d to %d pages\n", | ||
375 | i, max_size); | ||
376 | range.size = PFN_PHYS(max_size); | ||
377 | } else { | ||
378 | pr_err("Maxmem disabled node %d\n", i); | ||
379 | continue; | ||
380 | } | ||
381 | } | ||
382 | if (i >= MAX_NUMNODES) { | ||
383 | pr_err("Too many PA nodes (#%d): %#llx...%#llx\n", | ||
384 | i, range.size, range.size + range.start); | ||
385 | continue; | ||
386 | } | ||
387 | |||
388 | start = range.start >> PAGE_SHIFT; | ||
389 | size = range.size >> PAGE_SHIFT; | ||
390 | end = start + size; | ||
391 | |||
392 | #ifndef __tilegx__ | ||
393 | if (((HV_PhysAddr)end << PAGE_SHIFT) != | ||
394 | (range.start + range.size)) { | ||
395 | pr_err("PAs too high to represent: %#llx..%#llx\n", | ||
396 | range.start, range.start + range.size); | ||
397 | continue; | ||
398 | } | ||
399 | #endif | ||
400 | #ifdef CONFIG_PCI | ||
401 | /* | ||
402 | * Blocks that overlap the pci reserved region must | ||
403 | * have enough space to hold the maximum percpu data | ||
404 | * region at the top of the range. If there isn't | ||
405 | * enough space above the reserved region, just | ||
406 | * truncate the node. | ||
407 | */ | ||
408 | if (start <= pci_reserve_start_pfn && | ||
409 | end > pci_reserve_start_pfn) { | ||
410 | unsigned int per_cpu_size = | ||
411 | __per_cpu_end - __per_cpu_start; | ||
412 | unsigned int percpu_pages = | ||
413 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); | ||
414 | if (end < pci_reserve_end_pfn + percpu_pages) { | ||
415 | end = pci_reserve_start_pfn; | ||
416 | pr_err("PCI mapping region reduced node %d to" | ||
417 | " %ld pages\n", i, end - start); | ||
418 | } | ||
419 | } | ||
420 | #endif | ||
421 | |||
422 | for (j = __pfn_to_highbits(start); | ||
423 | j <= __pfn_to_highbits(end - 1); j++) | ||
424 | highbits_to_node[j] = i; | ||
425 | |||
426 | node_start_pfn[i] = start; | ||
427 | node_end_pfn[i] = end; | ||
428 | node_controller[i] = range.controller; | ||
429 | num_physpages += size; | ||
430 | max_pfn = end; | ||
431 | |||
432 | /* Mark node as online */ | ||
433 | node_set(i, node_online_map); | ||
434 | node_set(i, node_possible_map); | ||
435 | } | ||
436 | |||
437 | #ifndef __tilegx__ | ||
438 | /* | ||
439 | * For 4KB pages, mem_map "struct page" data is 1% of the size | ||
440 | * of the physical memory, so can be quite big (640 MB for | ||
441 | * four 16G zones). These structures must be mapped in | ||
442 | * lowmem, and since we currently cap out at about 768 MB, | ||
443 | * it's impractical to try to use this much address space. | ||
444 | * For now, arbitrarily cap the amount of physical memory | ||
445 | * we're willing to use at 8 million pages (32GB of 4KB pages). | ||
446 | */ | ||
447 | cap = 8 * 1024 * 1024; /* 8 million pages */ | ||
448 | if (num_physpages > cap) { | ||
449 | int num_nodes = num_online_nodes(); | ||
450 | int cap_each = cap / num_nodes; | ||
451 | unsigned long dropped_pages = 0; | ||
452 | for (i = 0; i < num_nodes; ++i) { | ||
453 | int size = node_end_pfn[i] - node_start_pfn[i]; | ||
454 | if (size > cap_each) { | ||
455 | dropped_pages += (size - cap_each); | ||
456 | node_end_pfn[i] = node_start_pfn[i] + cap_each; | ||
457 | } | ||
458 | } | ||
459 | num_physpages -= dropped_pages; | ||
460 | pr_warning("Only using %ldMB memory;" | ||
461 | " ignoring %ldMB.\n", | ||
462 | num_physpages >> (20 - PAGE_SHIFT), | ||
463 | dropped_pages >> (20 - PAGE_SHIFT)); | ||
464 | pr_warning("Consider using a larger page size.\n"); | ||
465 | } | ||
466 | #endif | ||
467 | |||
468 | /* Heap starts just above the last loaded address. */ | ||
469 | min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); | ||
470 | |||
471 | #ifdef CONFIG_HIGHMEM | ||
472 | /* Find where we map lowmem from each controller. */ | ||
473 | high_memory = setup_pa_va_mapping(); | ||
474 | |||
475 | /* Set max_low_pfn based on what node 0 can directly address. */ | ||
476 | max_low_pfn = node_lowmem_end_pfn[0]; | ||
477 | |||
478 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? | ||
479 | MAXMEM_PFN : mappable_physpages; | ||
480 | highmem_pages = (long) (num_physpages - lowmem_pages); | ||
481 | |||
482 | pr_notice("%ldMB HIGHMEM available.\n", | ||
483 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); | ||
484 | pr_notice("%ldMB LOWMEM available.\n", | ||
485 | pages_to_mb(lowmem_pages)); | ||
486 | #else | ||
487 | /* Set max_low_pfn based on what node 0 can directly address. */ | ||
488 | max_low_pfn = node_end_pfn[0]; | ||
489 | |||
490 | #ifndef __tilegx__ | ||
491 | if (node_end_pfn[0] > MAXMEM_PFN) { | ||
492 | pr_warning("Only using %ldMB LOWMEM.\n", | ||
493 | MAXMEM>>20); | ||
494 | pr_warning("Use a HIGHMEM enabled kernel.\n"); | ||
495 | max_low_pfn = MAXMEM_PFN; | ||
496 | max_pfn = MAXMEM_PFN; | ||
497 | num_physpages = MAXMEM_PFN; | ||
498 | node_end_pfn[0] = MAXMEM_PFN; | ||
499 | } else { | ||
500 | pr_notice("%ldMB memory available.\n", | ||
501 | pages_to_mb(node_end_pfn[0])); | ||
502 | } | ||
503 | for (i = 1; i < MAX_NUMNODES; ++i) { | ||
504 | node_start_pfn[i] = 0; | ||
505 | node_end_pfn[i] = 0; | ||
506 | } | ||
507 | high_memory = __va(node_end_pfn[0]); | ||
508 | #else | ||
509 | lowmem_pages = 0; | ||
510 | for (i = 0; i < MAX_NUMNODES; ++i) { | ||
511 | int pages = node_end_pfn[i] - node_start_pfn[i]; | ||
512 | lowmem_pages += pages; | ||
513 | if (pages) | ||
514 | high_memory = pfn_to_kaddr(node_end_pfn[i]); | ||
515 | } | ||
516 | pr_notice("%ldMB memory available.\n", | ||
517 | pages_to_mb(lowmem_pages)); | ||
518 | #endif | ||
519 | #endif | ||
520 | } | ||
521 | |||
522 | static void __init setup_bootmem_allocator(void) | ||
523 | { | ||
524 | unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; | ||
525 | |||
526 | /* Provide a node 0 bdata. */ | ||
527 | NODE_DATA(0)->bdata = &node0_bdata; | ||
528 | |||
529 | #ifdef CONFIG_PCI | ||
530 | /* Don't let boot memory alias the PCI region. */ | ||
531 | last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); | ||
532 | #else | ||
533 | last_alloc_pfn = max_low_pfn; | ||
534 | #endif | ||
535 | |||
536 | /* | ||
537 | * Initialize the boot-time allocator (with low memory only): | ||
538 | * The first argument says where to put the bitmap, and the | ||
539 | * second says where the end of allocatable memory is. | ||
540 | */ | ||
541 | bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); | ||
542 | |||
543 | /* | ||
544 | * Let the bootmem allocator use all the space we've given it | ||
545 | * except for its own bitmap. | ||
546 | */ | ||
547 | first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); | ||
548 | if (first_alloc_pfn >= last_alloc_pfn) | ||
549 | early_panic("Not enough memory on controller 0 for bootmem\n"); | ||
550 | |||
551 | free_bootmem(PFN_PHYS(first_alloc_pfn), | ||
552 | PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); | ||
553 | |||
554 | #ifdef CONFIG_KEXEC | ||
555 | if (crashk_res.start != crashk_res.end) | ||
556 | reserve_bootmem(crashk_res.start, | ||
557 | crashk_res.end - crashk_res.start + 1, 0); | ||
558 | #endif | ||
559 | |||
560 | } | ||
561 | |||
562 | void *__init alloc_remap(int nid, unsigned long size) | ||
563 | { | ||
564 | int pages = node_end_pfn[nid] - node_start_pfn[nid]; | ||
565 | void *map = pfn_to_kaddr(node_memmap_pfn[nid]); | ||
566 | BUG_ON(size != pages * sizeof(struct page)); | ||
567 | memset(map, 0, size); | ||
568 | return map; | ||
569 | } | ||
570 | |||
571 | static int __init percpu_size(void) | ||
572 | { | ||
573 | int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | ||
574 | #ifdef CONFIG_MODULES | ||
575 | if (size < PERCPU_ENOUGH_ROOM) | ||
576 | size = PERCPU_ENOUGH_ROOM; | ||
577 | #endif | ||
578 | /* In several places we assume the per-cpu data fits on a huge page. */ | ||
579 | BUG_ON(kdata_huge && size > HPAGE_SIZE); | ||
580 | return size; | ||
581 | } | ||
582 | |||
583 | static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) | ||
584 | { | ||
585 | void *kva = __alloc_bootmem(size, PAGE_SIZE, goal); | ||
586 | unsigned long pfn = kaddr_to_pfn(kva); | ||
587 | BUG_ON(goal && PFN_PHYS(pfn) != goal); | ||
588 | return pfn; | ||
589 | } | ||
590 | |||
591 | static void __init zone_sizes_init(void) | ||
592 | { | ||
593 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | ||
594 | unsigned long node_percpu[MAX_NUMNODES] = { 0 }; | ||
595 | int size = percpu_size(); | ||
596 | int num_cpus = smp_height * smp_width; | ||
597 | int i; | ||
598 | |||
599 | for (i = 0; i < num_cpus; ++i) | ||
600 | node_percpu[cpu_to_node(i)] += size; | ||
601 | |||
602 | for_each_online_node(i) { | ||
603 | unsigned long start = node_start_pfn[i]; | ||
604 | unsigned long end = node_end_pfn[i]; | ||
605 | #ifdef CONFIG_HIGHMEM | ||
606 | unsigned long lowmem_end = node_lowmem_end_pfn[i]; | ||
607 | #else | ||
608 | unsigned long lowmem_end = end; | ||
609 | #endif | ||
610 | int memmap_size = (end - start) * sizeof(struct page); | ||
611 | node_free_pfn[i] = start; | ||
612 | |||
613 | /* | ||
614 | * Set aside pages for per-cpu data and the mem_map array. | ||
615 | * | ||
616 | * Since the per-cpu data requires special homecaching, | ||
617 | * if we are in kdata_huge mode, we put it at the end of | ||
618 | * the lowmem region. If we're not in kdata_huge mode, | ||
619 | * we take the per-cpu pages from the bottom of the | ||
620 | * controller, since that avoids fragmenting a huge page | ||
621 | * that users might want. We always take the memmap | ||
622 | * from the bottom of the controller, since with | ||
623 | * kdata_huge that lets it be under a huge TLB entry. | ||
624 | * | ||
625 | * If the user has requested isolnodes for a controller, | ||
626 | * though, there'll be no lowmem, so we just alloc_bootmem | ||
627 | * the memmap. There will be no percpu memory either. | ||
628 | */ | ||
629 | if (__pfn_to_highbits(start) == 0) { | ||
630 | /* In low PAs, allocate via bootmem. */ | ||
631 | unsigned long goal = 0; | ||
632 | node_memmap_pfn[i] = | ||
633 | alloc_bootmem_pfn(memmap_size, goal); | ||
634 | if (kdata_huge) | ||
635 | goal = PFN_PHYS(lowmem_end) - node_percpu[i]; | ||
636 | if (node_percpu[i]) | ||
637 | node_percpu_pfn[i] = | ||
638 | alloc_bootmem_pfn(node_percpu[i], goal); | ||
639 | } else if (cpu_isset(i, isolnodes)) { | ||
640 | node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0); | ||
641 | BUG_ON(node_percpu[i] != 0); | ||
642 | } else { | ||
643 | /* In high PAs, just reserve some pages. */ | ||
644 | node_memmap_pfn[i] = node_free_pfn[i]; | ||
645 | node_free_pfn[i] += PFN_UP(memmap_size); | ||
646 | if (!kdata_huge) { | ||
647 | node_percpu_pfn[i] = node_free_pfn[i]; | ||
648 | node_free_pfn[i] += PFN_UP(node_percpu[i]); | ||
649 | } else { | ||
650 | node_percpu_pfn[i] = | ||
651 | lowmem_end - PFN_UP(node_percpu[i]); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | #ifdef CONFIG_HIGHMEM | ||
656 | if (start > lowmem_end) { | ||
657 | zones_size[ZONE_NORMAL] = 0; | ||
658 | zones_size[ZONE_HIGHMEM] = end - start; | ||
659 | } else { | ||
660 | zones_size[ZONE_NORMAL] = lowmem_end - start; | ||
661 | zones_size[ZONE_HIGHMEM] = end - lowmem_end; | ||
662 | } | ||
663 | #else | ||
664 | zones_size[ZONE_NORMAL] = end - start; | ||
665 | #endif | ||
666 | |||
667 | /* | ||
668 | * Everyone shares node 0's bootmem allocator, but | ||
669 | * we use alloc_remap(), above, to put the actual | ||
670 | * struct page array on the individual controllers, | ||
671 | * which is most of the data that we actually care about. | ||
672 | * We can't place bootmem allocators on the other | ||
673 | * controllers since the bootmem allocator can only | ||
674 | * operate on 32-bit physical addresses. | ||
675 | */ | ||
676 | NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; | ||
677 | |||
678 | free_area_init_node(i, zones_size, start, NULL); | ||
679 | printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", | ||
680 | PFN_UP(node_percpu[i])); | ||
681 | |||
682 | /* Track the type of memory on each node */ | ||
683 | if (zones_size[ZONE_NORMAL]) | ||
684 | node_set_state(i, N_NORMAL_MEMORY); | ||
685 | #ifdef CONFIG_HIGHMEM | ||
686 | if (end != start) | ||
687 | node_set_state(i, N_HIGH_MEMORY); | ||
688 | #endif | ||
689 | |||
690 | node_set_online(i); | ||
691 | } | ||
692 | } | ||
693 | |||
694 | #ifdef CONFIG_NUMA | ||
695 | |||
696 | /* which logical CPUs are on which nodes */ | ||
697 | struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; | ||
698 | EXPORT_SYMBOL(node_2_cpu_mask); | ||
699 | |||
700 | /* which node each logical CPU is on */ | ||
701 | char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
702 | EXPORT_SYMBOL(cpu_2_node); | ||
703 | |||
704 | /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ | ||
705 | static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) | ||
706 | { | ||
707 | if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) | ||
708 | return -1; | ||
709 | else | ||
710 | return cpu_to_node(cpu); | ||
711 | } | ||
712 | |||
713 | /* Return number of immediately-adjacent tiles sharing the same NUMA node. */ | ||
714 | static int __init node_neighbors(int node, int cpu, | ||
715 | struct cpumask *unbound_cpus) | ||
716 | { | ||
717 | int neighbors = 0; | ||
718 | int w = smp_width; | ||
719 | int h = smp_height; | ||
720 | int x = cpu % w; | ||
721 | int y = cpu / w; | ||
722 | if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) | ||
723 | ++neighbors; | ||
724 | if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) | ||
725 | ++neighbors; | ||
726 | if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) | ||
727 | ++neighbors; | ||
728 | if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) | ||
729 | ++neighbors; | ||
730 | return neighbors; | ||
731 | } | ||
732 | |||
733 | static void __init setup_numa_mapping(void) | ||
734 | { | ||
735 | int distance[MAX_NUMNODES][NR_CPUS]; | ||
736 | HV_Coord coord; | ||
737 | int cpu, node, cpus, i, x, y; | ||
738 | int num_nodes = num_online_nodes(); | ||
739 | struct cpumask unbound_cpus; | ||
740 | nodemask_t default_nodes; | ||
741 | |||
742 | cpumask_clear(&unbound_cpus); | ||
743 | |||
744 | /* Get set of nodes we will use for defaults */ | ||
745 | nodes_andnot(default_nodes, node_online_map, isolnodes); | ||
746 | if (nodes_empty(default_nodes)) { | ||
747 | BUG_ON(!node_isset(0, node_online_map)); | ||
748 | pr_err("Forcing NUMA node zero available as a default node\n"); | ||
749 | node_set(0, default_nodes); | ||
750 | } | ||
751 | |||
752 | /* Populate the distance[] array */ | ||
753 | memset(distance, -1, sizeof(distance)); | ||
754 | cpu = 0; | ||
755 | for (coord.y = 0; coord.y < smp_height; ++coord.y) { | ||
756 | for (coord.x = 0; coord.x < smp_width; | ||
757 | ++coord.x, ++cpu) { | ||
758 | BUG_ON(cpu >= nr_cpu_ids); | ||
759 | if (!cpu_possible(cpu)) { | ||
760 | cpu_2_node[cpu] = -1; | ||
761 | continue; | ||
762 | } | ||
763 | for_each_node_mask(node, default_nodes) { | ||
764 | HV_MemoryControllerInfo info = | ||
765 | hv_inquire_memory_controller( | ||
766 | coord, node_controller[node]); | ||
767 | distance[node][cpu] = | ||
768 | ABS(info.coord.x) + ABS(info.coord.y); | ||
769 | } | ||
770 | cpumask_set_cpu(cpu, &unbound_cpus); | ||
771 | } | ||
772 | } | ||
773 | cpus = cpu; | ||
774 | |||
775 | /* | ||
776 | * Round-robin through the NUMA nodes until all the cpus are | ||
777 | * assigned. We could be more clever here (e.g. create four | ||
778 | * sorted linked lists on the same set of cpu nodes, and pull | ||
779 | * off them in round-robin sequence, removing from all four | ||
780 | * lists each time) but given the relatively small numbers | ||
781 | * involved, O(n^2) seem OK for a one-time cost. | ||
782 | */ | ||
783 | node = first_node(default_nodes); | ||
784 | while (!cpumask_empty(&unbound_cpus)) { | ||
785 | int best_cpu = -1; | ||
786 | int best_distance = INT_MAX; | ||
787 | for (cpu = 0; cpu < cpus; ++cpu) { | ||
788 | if (cpumask_test_cpu(cpu, &unbound_cpus)) { | ||
789 | /* | ||
790 | * Compute metric, which is how much | ||
791 | * closer the cpu is to this memory | ||
792 | * controller than the others, shifted | ||
793 | * up, and then the number of | ||
794 | * neighbors already in the node as an | ||
795 | * epsilon adjustment to try to keep | ||
796 | * the nodes compact. | ||
797 | */ | ||
798 | int d = distance[node][cpu] * num_nodes; | ||
799 | for_each_node_mask(i, default_nodes) { | ||
800 | if (i != node) | ||
801 | d -= distance[i][cpu]; | ||
802 | } | ||
803 | d *= 8; /* allow space for epsilon */ | ||
804 | d -= node_neighbors(node, cpu, &unbound_cpus); | ||
805 | if (d < best_distance) { | ||
806 | best_cpu = cpu; | ||
807 | best_distance = d; | ||
808 | } | ||
809 | } | ||
810 | } | ||
811 | BUG_ON(best_cpu < 0); | ||
812 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); | ||
813 | cpu_2_node[best_cpu] = node; | ||
814 | cpumask_clear_cpu(best_cpu, &unbound_cpus); | ||
815 | node = next_node(node, default_nodes); | ||
816 | if (node == MAX_NUMNODES) | ||
817 | node = first_node(default_nodes); | ||
818 | } | ||
819 | |||
820 | /* Print out node assignments and set defaults for disabled cpus */ | ||
821 | cpu = 0; | ||
822 | for (y = 0; y < smp_height; ++y) { | ||
823 | printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); | ||
824 | for (x = 0; x < smp_width; ++x, ++cpu) { | ||
825 | if (cpu_to_node(cpu) < 0) { | ||
826 | pr_cont(" -"); | ||
827 | cpu_2_node[cpu] = first_node(default_nodes); | ||
828 | } else { | ||
829 | pr_cont(" %d", cpu_to_node(cpu)); | ||
830 | } | ||
831 | } | ||
832 | pr_cont("\n"); | ||
833 | } | ||
834 | } | ||
835 | |||
836 | static struct cpu cpu_devices[NR_CPUS]; | ||
837 | |||
838 | static int __init topology_init(void) | ||
839 | { | ||
840 | int i; | ||
841 | |||
842 | for_each_online_node(i) | ||
843 | register_one_node(i); | ||
844 | |||
845 | for_each_present_cpu(i) | ||
846 | register_cpu(&cpu_devices[i], i); | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
851 | subsys_initcall(topology_init); | ||
852 | |||
853 | #else /* !CONFIG_NUMA */ | ||
854 | |||
855 | #define setup_numa_mapping() do { } while (0) | ||
856 | |||
857 | #endif /* CONFIG_NUMA */ | ||
858 | |||
859 | /** | ||
860 | * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. | ||
861 | * @boot: Is this the boot cpu? | ||
862 | * | ||
863 | * Called from setup_arch() on the boot cpu, or online_secondary(). | ||
864 | */ | ||
865 | void __cpuinit setup_cpu(int boot) | ||
866 | { | ||
867 | /* The boot cpu sets up its permanent mappings much earlier. */ | ||
868 | if (!boot) | ||
869 | store_permanent_mappings(); | ||
870 | |||
871 | /* Allow asynchronous TLB interrupts. */ | ||
872 | #if CHIP_HAS_TILE_DMA() | ||
873 | raw_local_irq_unmask(INT_DMATLB_MISS); | ||
874 | raw_local_irq_unmask(INT_DMATLB_ACCESS); | ||
875 | #endif | ||
876 | #if CHIP_HAS_SN_PROC() | ||
877 | raw_local_irq_unmask(INT_SNITLB_MISS); | ||
878 | #endif | ||
879 | |||
880 | /* | ||
881 | * Allow user access to many generic SPRs, like the cycle | ||
882 | * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. | ||
883 | */ | ||
884 | __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); | ||
885 | |||
886 | #if CHIP_HAS_SN() | ||
887 | /* Static network is not restricted. */ | ||
888 | __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); | ||
889 | #endif | ||
890 | #if CHIP_HAS_SN_PROC() | ||
891 | __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); | ||
892 | __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); | ||
893 | #endif | ||
894 | |||
895 | /* | ||
896 | * Set the MPL for interrupt control 0 to user level. | ||
897 | * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, | ||
898 | * as well as the PL 0 interrupt mask. | ||
899 | */ | ||
900 | __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); | ||
901 | |||
902 | /* Initialize IRQ support for this cpu. */ | ||
903 | setup_irq_regs(); | ||
904 | |||
905 | #ifdef CONFIG_HARDWALL | ||
906 | /* Reset the network state on this cpu. */ | ||
907 | reset_network_state(); | ||
908 | #endif | ||
909 | } | ||
910 | |||
911 | static int __initdata set_initramfs_file; | ||
912 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | ||
913 | |||
914 | static int __init setup_initramfs_file(char *str) | ||
915 | { | ||
916 | if (str == NULL) | ||
917 | return -EINVAL; | ||
918 | strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); | ||
919 | set_initramfs_file = 1; | ||
920 | |||
921 | return 0; | ||
922 | } | ||
923 | early_param("initramfs_file", setup_initramfs_file); | ||
924 | |||
925 | /* | ||
926 | * We look for an additional "initramfs.cpio.gz" file in the hvfs. | ||
927 | * If there is one, we allocate some memory for it and it will be | ||
928 | * unpacked to the initramfs after any built-in initramfs_data. | ||
929 | */ | ||
930 | static void __init load_hv_initrd(void) | ||
931 | { | ||
932 | HV_FS_StatInfo stat; | ||
933 | int fd, rc; | ||
934 | void *initrd; | ||
935 | |||
936 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | ||
937 | if (fd == HV_ENOENT) { | ||
938 | if (set_initramfs_file) | ||
939 | pr_warning("No such hvfs initramfs file '%s'\n", | ||
940 | initramfs_file); | ||
941 | return; | ||
942 | } | ||
943 | BUG_ON(fd < 0); | ||
944 | stat = hv_fs_fstat(fd); | ||
945 | BUG_ON(stat.size < 0); | ||
946 | if (stat.flags & HV_FS_ISDIR) { | ||
947 | pr_warning("Ignoring hvfs file '%s': it's a directory.\n", | ||
948 | initramfs_file); | ||
949 | return; | ||
950 | } | ||
951 | initrd = alloc_bootmem_pages(stat.size); | ||
952 | rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); | ||
953 | if (rc != stat.size) { | ||
954 | pr_err("Error reading %d bytes from hvfs file '%s': %d\n", | ||
955 | stat.size, initramfs_file, rc); | ||
956 | free_bootmem((unsigned long) initrd, stat.size); | ||
957 | return; | ||
958 | } | ||
959 | initrd_start = (unsigned long) initrd; | ||
960 | initrd_end = initrd_start + stat.size; | ||
961 | } | ||
962 | |||
963 | void __init free_initrd_mem(unsigned long begin, unsigned long end) | ||
964 | { | ||
965 | free_bootmem(begin, end - begin); | ||
966 | } | ||
967 | |||
968 | static void __init validate_hv(void) | ||
969 | { | ||
970 | /* | ||
971 | * It may already be too late, but let's check our built-in | ||
972 | * configuration against what the hypervisor is providing. | ||
973 | */ | ||
974 | unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); | ||
975 | int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); | ||
976 | int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); | ||
977 | HV_ASIDRange asid_range; | ||
978 | |||
979 | #ifndef CONFIG_SMP | ||
980 | HV_Topology topology = hv_inquire_topology(); | ||
981 | BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); | ||
982 | if (topology.width != 1 || topology.height != 1) { | ||
983 | pr_warning("Warning: booting UP kernel on %dx%d grid;" | ||
984 | " will ignore all but first tile.\n", | ||
985 | topology.width, topology.height); | ||
986 | } | ||
987 | #endif | ||
988 | |||
989 | if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) | ||
990 | early_panic("Hypervisor glue size %ld is too big!\n", | ||
991 | glue_size); | ||
992 | if (hv_page_size != PAGE_SIZE) | ||
993 | early_panic("Hypervisor page size %#x != our %#lx\n", | ||
994 | hv_page_size, PAGE_SIZE); | ||
995 | if (hv_hpage_size != HPAGE_SIZE) | ||
996 | early_panic("Hypervisor huge page size %#x != our %#lx\n", | ||
997 | hv_hpage_size, HPAGE_SIZE); | ||
998 | |||
999 | #ifdef CONFIG_SMP | ||
1000 | /* | ||
1001 | * Some hypervisor APIs take a pointer to a bitmap array | ||
1002 | * whose size is at least the number of cpus on the chip. | ||
1003 | * We use a struct cpumask for this, so it must be big enough. | ||
1004 | */ | ||
1005 | if ((smp_height * smp_width) > nr_cpu_ids) | ||
1006 | early_panic("Hypervisor %d x %d grid too big for Linux" | ||
1007 | " NR_CPUS %d\n", smp_height, smp_width, | ||
1008 | nr_cpu_ids); | ||
1009 | #endif | ||
1010 | |||
1011 | /* | ||
1012 | * Check that we're using allowed ASIDs, and initialize the | ||
1013 | * various asid variables to their appropriate initial states. | ||
1014 | */ | ||
1015 | asid_range = hv_inquire_asid(0); | ||
1016 | __get_cpu_var(current_asid) = min_asid = asid_range.start; | ||
1017 | max_asid = asid_range.start + asid_range.size - 1; | ||
1018 | |||
1019 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, | ||
1020 | sizeof(chip_model)) < 0) { | ||
1021 | pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); | ||
1022 | strlcpy(chip_model, "unknown", sizeof(chip_model)); | ||
1023 | } | ||
1024 | } | ||
1025 | |||
1026 | static void __init validate_va(void) | ||
1027 | { | ||
1028 | #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ | ||
1029 | /* | ||
1030 | * Similarly, make sure we're only using allowed VAs. | ||
1031 | * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, | ||
1032 | * and 0 .. KERNEL_HIGH_VADDR. | ||
1033 | * In addition, make sure we CAN'T use the end of memory, since | ||
1034 | * we use the last chunk of each pgd for the pgd_list. | ||
1035 | */ | ||
1036 | int i, fc_fd_ok = 0; | ||
1037 | unsigned long max_va = 0; | ||
1038 | unsigned long list_va = | ||
1039 | ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); | ||
1040 | |||
1041 | for (i = 0; ; ++i) { | ||
1042 | HV_VirtAddrRange range = hv_inquire_virtual(i); | ||
1043 | if (range.size == 0) | ||
1044 | break; | ||
1045 | if (range.start <= MEM_USER_INTRPT && | ||
1046 | range.start + range.size >= MEM_HV_INTRPT) | ||
1047 | fc_fd_ok = 1; | ||
1048 | if (range.start == 0) | ||
1049 | max_va = range.size; | ||
1050 | BUG_ON(range.start + range.size > list_va); | ||
1051 | } | ||
1052 | if (!fc_fd_ok) | ||
1053 | early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); | ||
1054 | if (max_va == 0) | ||
1055 | early_panic("Hypervisor not configured for low VAs\n"); | ||
1056 | if (max_va < KERNEL_HIGH_VADDR) | ||
1057 | early_panic("Hypervisor max VA %#lx smaller than %#lx\n", | ||
1058 | max_va, KERNEL_HIGH_VADDR); | ||
1059 | |||
1060 | /* Kernel PCs must have their high bit set; see intvec.S. */ | ||
1061 | if ((long)VMALLOC_START >= 0) | ||
1062 | early_panic( | ||
1063 | "Linux VMALLOC region below the 2GB line (%#lx)!\n" | ||
1064 | "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" | ||
1065 | "or smaller VMALLOC_RESERVE.\n", | ||
1066 | VMALLOC_START); | ||
1067 | #endif | ||
1068 | } | ||
1069 | |||
1070 | /* | ||
1071 | * cpu_lotar_map lists all the cpus that are valid for the supervisor | ||
1072 | * to cache data on at a page level, i.e. what cpus can be placed in | ||
1073 | * the LOTAR field of a PTE. It is equivalent to the set of possible | ||
1074 | * cpus plus any other cpus that are willing to share their cache. | ||
1075 | * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). | ||
1076 | */ | ||
1077 | struct cpumask __write_once cpu_lotar_map; | ||
1078 | EXPORT_SYMBOL(cpu_lotar_map); | ||
1079 | |||
1080 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1081 | /* | ||
1082 | * hash_for_home_map lists all the tiles that hash-for-home data | ||
1083 | * will be cached on. Note that this may includes tiles that are not | ||
1084 | * valid for this supervisor to use otherwise (e.g. if a hypervisor | ||
1085 | * device is being shared between multiple supervisors). | ||
1086 | * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). | ||
1087 | */ | ||
1088 | struct cpumask hash_for_home_map; | ||
1089 | EXPORT_SYMBOL(hash_for_home_map); | ||
1090 | #endif | ||
1091 | |||
1092 | /* | ||
1093 | * cpu_cacheable_map lists all the cpus whose caches the hypervisor can | ||
1094 | * flush on our behalf. It is set to cpu_possible_map OR'ed with | ||
1095 | * hash_for_home_map, and it is what should be passed to | ||
1096 | * hv_flush_remote() to flush all caches. Note that if there are | ||
1097 | * dedicated hypervisor driver tiles that have authorized use of their | ||
1098 | * cache, those tiles will only appear in cpu_lotar_map, NOT in | ||
1099 | * cpu_cacheable_map, as they are a special case. | ||
1100 | */ | ||
1101 | struct cpumask __write_once cpu_cacheable_map; | ||
1102 | EXPORT_SYMBOL(cpu_cacheable_map); | ||
1103 | |||
1104 | static __initdata struct cpumask disabled_map; | ||
1105 | |||
1106 | static int __init disabled_cpus(char *str) | ||
1107 | { | ||
1108 | int boot_cpu = smp_processor_id(); | ||
1109 | |||
1110 | if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) | ||
1111 | return -EINVAL; | ||
1112 | if (cpumask_test_cpu(boot_cpu, &disabled_map)) { | ||
1113 | pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); | ||
1114 | cpumask_clear_cpu(boot_cpu, &disabled_map); | ||
1115 | } | ||
1116 | return 0; | ||
1117 | } | ||
1118 | |||
1119 | early_param("disabled_cpus", disabled_cpus); | ||
1120 | |||
1121 | void __init print_disabled_cpus(void) | ||
1122 | { | ||
1123 | if (!cpumask_empty(&disabled_map)) { | ||
1124 | char buf[100]; | ||
1125 | cpulist_scnprintf(buf, sizeof(buf), &disabled_map); | ||
1126 | pr_info("CPUs not available for Linux: %s\n", buf); | ||
1127 | } | ||
1128 | } | ||
1129 | |||
1130 | static void __init setup_cpu_maps(void) | ||
1131 | { | ||
1132 | struct cpumask hv_disabled_map, cpu_possible_init; | ||
1133 | int boot_cpu = smp_processor_id(); | ||
1134 | int cpus, i, rc; | ||
1135 | |||
1136 | /* Learn which cpus are allowed by the hypervisor. */ | ||
1137 | rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, | ||
1138 | (HV_VirtAddr) cpumask_bits(&cpu_possible_init), | ||
1139 | sizeof(cpu_cacheable_map)); | ||
1140 | if (rc < 0) | ||
1141 | early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); | ||
1142 | if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) | ||
1143 | early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); | ||
1144 | |||
1145 | /* Compute the cpus disabled by the hvconfig file. */ | ||
1146 | cpumask_complement(&hv_disabled_map, &cpu_possible_init); | ||
1147 | |||
1148 | /* Include them with the cpus disabled by "disabled_cpus". */ | ||
1149 | cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); | ||
1150 | |||
1151 | /* | ||
1152 | * Disable every cpu after "setup_max_cpus". But don't mark | ||
1153 | * as disabled the cpus that are outside of our initial rectangle, | ||
1154 | * since that turns out to be confusing. | ||
1155 | */ | ||
1156 | cpus = 1; /* this cpu */ | ||
1157 | cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ | ||
1158 | for (i = 0; cpus < setup_max_cpus; ++i) | ||
1159 | if (!cpumask_test_cpu(i, &disabled_map)) | ||
1160 | ++cpus; | ||
1161 | for (; i < smp_height * smp_width; ++i) | ||
1162 | cpumask_set_cpu(i, &disabled_map); | ||
1163 | cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ | ||
1164 | for (i = smp_height * smp_width; i < NR_CPUS; ++i) | ||
1165 | cpumask_clear_cpu(i, &disabled_map); | ||
1166 | |||
1167 | /* | ||
1168 | * Setup cpu_possible map as every cpu allocated to us, minus | ||
1169 | * the results of any "disabled_cpus" settings. | ||
1170 | */ | ||
1171 | cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); | ||
1172 | init_cpu_possible(&cpu_possible_init); | ||
1173 | |||
1174 | /* Learn which cpus are valid for LOTAR caching. */ | ||
1175 | rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, | ||
1176 | (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), | ||
1177 | sizeof(cpu_lotar_map)); | ||
1178 | if (rc < 0) { | ||
1179 | pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); | ||
1180 | cpu_lotar_map = cpu_possible_map; | ||
1181 | } | ||
1182 | |||
1183 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1184 | /* Retrieve set of CPUs used for hash-for-home caching */ | ||
1185 | rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, | ||
1186 | (HV_VirtAddr) hash_for_home_map.bits, | ||
1187 | sizeof(hash_for_home_map)); | ||
1188 | if (rc < 0) | ||
1189 | early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); | ||
1190 | cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); | ||
1191 | #else | ||
1192 | cpu_cacheable_map = cpu_possible_map; | ||
1193 | #endif | ||
1194 | } | ||
1195 | |||
1196 | |||
1197 | static int __init dataplane(char *str) | ||
1198 | { | ||
1199 | pr_warning("WARNING: dataplane support disabled in this kernel\n"); | ||
1200 | return 0; | ||
1201 | } | ||
1202 | |||
1203 | early_param("dataplane", dataplane); | ||
1204 | |||
1205 | #ifdef CONFIG_CMDLINE_BOOL | ||
1206 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
1207 | #endif | ||
1208 | |||
1209 | void __init setup_arch(char **cmdline_p) | ||
1210 | { | ||
1211 | int len; | ||
1212 | |||
1213 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) | ||
1214 | len = hv_get_command_line((HV_VirtAddr) boot_command_line, | ||
1215 | COMMAND_LINE_SIZE); | ||
1216 | if (boot_command_line[0]) | ||
1217 | pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", | ||
1218 | boot_command_line); | ||
1219 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
1220 | #else | ||
1221 | char *hv_cmdline; | ||
1222 | #if defined(CONFIG_CMDLINE_BOOL) | ||
1223 | if (builtin_cmdline[0]) { | ||
1224 | int builtin_len = strlcpy(boot_command_line, builtin_cmdline, | ||
1225 | COMMAND_LINE_SIZE); | ||
1226 | if (builtin_len < COMMAND_LINE_SIZE-1) | ||
1227 | boot_command_line[builtin_len++] = ' '; | ||
1228 | hv_cmdline = &boot_command_line[builtin_len]; | ||
1229 | len = COMMAND_LINE_SIZE - builtin_len; | ||
1230 | } else | ||
1231 | #endif | ||
1232 | { | ||
1233 | hv_cmdline = boot_command_line; | ||
1234 | len = COMMAND_LINE_SIZE; | ||
1235 | } | ||
1236 | len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); | ||
1237 | if (len < 0 || len > COMMAND_LINE_SIZE) | ||
1238 | early_panic("hv_get_command_line failed: %d\n", len); | ||
1239 | #endif | ||
1240 | |||
1241 | *cmdline_p = boot_command_line; | ||
1242 | |||
1243 | /* Set disabled_map and setup_max_cpus very early */ | ||
1244 | parse_early_param(); | ||
1245 | |||
1246 | /* Make sure the kernel is compatible with the hypervisor. */ | ||
1247 | validate_hv(); | ||
1248 | validate_va(); | ||
1249 | |||
1250 | setup_cpu_maps(); | ||
1251 | |||
1252 | |||
1253 | #ifdef CONFIG_PCI | ||
1254 | /* | ||
1255 | * Initialize the PCI structures. This is done before memory | ||
1256 | * setup so that we know whether or not a pci_reserve region | ||
1257 | * is necessary. | ||
1258 | */ | ||
1259 | if (tile_pci_init() == 0) | ||
1260 | pci_reserve_mb = 0; | ||
1261 | |||
1262 | /* PCI systems reserve a region just below 4GB for mapping iomem. */ | ||
1263 | pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); | ||
1264 | pci_reserve_start_pfn = pci_reserve_end_pfn - | ||
1265 | (pci_reserve_mb << (20 - PAGE_SHIFT)); | ||
1266 | #endif | ||
1267 | |||
1268 | init_mm.start_code = (unsigned long) _text; | ||
1269 | init_mm.end_code = (unsigned long) _etext; | ||
1270 | init_mm.end_data = (unsigned long) _edata; | ||
1271 | init_mm.brk = (unsigned long) _end; | ||
1272 | |||
1273 | setup_memory(); | ||
1274 | store_permanent_mappings(); | ||
1275 | setup_bootmem_allocator(); | ||
1276 | |||
1277 | /* | ||
1278 | * NOTE: before this point _nobody_ is allowed to allocate | ||
1279 | * any memory using the bootmem allocator. | ||
1280 | */ | ||
1281 | |||
1282 | paging_init(); | ||
1283 | setup_numa_mapping(); | ||
1284 | zone_sizes_init(); | ||
1285 | set_page_homes(); | ||
1286 | setup_cpu(1); | ||
1287 | setup_clock(); | ||
1288 | load_hv_initrd(); | ||
1289 | } | ||
1290 | |||
1291 | |||
1292 | /* | ||
1293 | * Set up per-cpu memory. | ||
1294 | */ | ||
1295 | |||
1296 | unsigned long __per_cpu_offset[NR_CPUS] __write_once; | ||
1297 | EXPORT_SYMBOL(__per_cpu_offset); | ||
1298 | |||
1299 | static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; | ||
1300 | static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; | ||
1301 | |||
1302 | /* | ||
1303 | * As the percpu code allocates pages, we return the pages from the | ||
1304 | * end of the node for the specified cpu. | ||
1305 | */ | ||
1306 | static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | ||
1307 | { | ||
1308 | int nid = cpu_to_node(cpu); | ||
1309 | unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; | ||
1310 | |||
1311 | BUG_ON(size % PAGE_SIZE != 0); | ||
1312 | pfn_offset[nid] += size / PAGE_SIZE; | ||
1313 | if (percpu_pfn[cpu] == 0) | ||
1314 | percpu_pfn[cpu] = pfn; | ||
1315 | return pfn_to_kaddr(pfn); | ||
1316 | } | ||
1317 | |||
1318 | /* | ||
1319 | * Pages reserved for percpu memory are not freeable, and in any case we are | ||
1320 | * on a short path to panic() in setup_per_cpu_area() at this point anyway. | ||
1321 | */ | ||
1322 | static void __init pcpu_fc_free(void *ptr, size_t size) | ||
1323 | { | ||
1324 | } | ||
1325 | |||
1326 | /* | ||
1327 | * Set up vmalloc page tables using bootmem for the percpu code. | ||
1328 | */ | ||
1329 | static void __init pcpu_fc_populate_pte(unsigned long addr) | ||
1330 | { | ||
1331 | pgd_t *pgd; | ||
1332 | pud_t *pud; | ||
1333 | pmd_t *pmd; | ||
1334 | pte_t *pte; | ||
1335 | |||
1336 | BUG_ON(pgd_addr_invalid(addr)); | ||
1337 | |||
1338 | pgd = swapper_pg_dir + pgd_index(addr); | ||
1339 | pud = pud_offset(pgd, addr); | ||
1340 | BUG_ON(!pud_present(*pud)); | ||
1341 | pmd = pmd_offset(pud, addr); | ||
1342 | if (pmd_present(*pmd)) { | ||
1343 | BUG_ON(pmd_huge_page(*pmd)); | ||
1344 | } else { | ||
1345 | pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, | ||
1346 | HV_PAGE_TABLE_ALIGN, 0); | ||
1347 | pmd_populate_kernel(&init_mm, pmd, pte); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | void __init setup_per_cpu_areas(void) | ||
1352 | { | ||
1353 | struct page *pg; | ||
1354 | unsigned long delta, pfn, lowmem_va; | ||
1355 | unsigned long size = percpu_size(); | ||
1356 | char *ptr; | ||
1357 | int rc, cpu, i; | ||
1358 | |||
1359 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, | ||
1360 | pcpu_fc_free, pcpu_fc_populate_pte); | ||
1361 | if (rc < 0) | ||
1362 | panic("Cannot initialize percpu area (err=%d)", rc); | ||
1363 | |||
1364 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | ||
1365 | for_each_possible_cpu(cpu) { | ||
1366 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | ||
1367 | |||
1368 | /* finv the copy out of cache so we can change homecache */ | ||
1369 | ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; | ||
1370 | __finv_buffer(ptr, size); | ||
1371 | pfn = percpu_pfn[cpu]; | ||
1372 | |||
1373 | /* Rewrite the page tables to cache on that cpu */ | ||
1374 | pg = pfn_to_page(pfn); | ||
1375 | for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { | ||
1376 | |||
1377 | /* Update the vmalloc mapping and page home. */ | ||
1378 | pte_t *ptep = | ||
1379 | virt_to_pte(NULL, (unsigned long)ptr + i); | ||
1380 | pte_t pte = *ptep; | ||
1381 | BUG_ON(pfn != pte_pfn(pte)); | ||
1382 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
1383 | pte = set_remote_cache_cpu(pte, cpu); | ||
1384 | set_pte(ptep, pte); | ||
1385 | |||
1386 | /* Update the lowmem mapping for consistency. */ | ||
1387 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | ||
1388 | ptep = virt_to_pte(NULL, lowmem_va); | ||
1389 | if (pte_huge(*ptep)) { | ||
1390 | printk(KERN_DEBUG "early shatter of huge page" | ||
1391 | " at %#lx\n", lowmem_va); | ||
1392 | shatter_pmd((pmd_t *)ptep); | ||
1393 | ptep = virt_to_pte(NULL, lowmem_va); | ||
1394 | BUG_ON(pte_huge(*ptep)); | ||
1395 | } | ||
1396 | BUG_ON(pfn != pte_pfn(*ptep)); | ||
1397 | set_pte(ptep, pte); | ||
1398 | } | ||
1399 | } | ||
1400 | |||
1401 | /* Set our thread pointer appropriately. */ | ||
1402 | set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); | ||
1403 | |||
1404 | /* Make sure the finv's have completed. */ | ||
1405 | mb_incoherent(); | ||
1406 | |||
1407 | /* Flush the TLB so we reference it properly from here on out. */ | ||
1408 | local_flush_tlb_all(); | ||
1409 | } | ||
1410 | |||
1411 | static struct resource data_resource = { | ||
1412 | .name = "Kernel data", | ||
1413 | .start = 0, | ||
1414 | .end = 0, | ||
1415 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
1416 | }; | ||
1417 | |||
1418 | static struct resource code_resource = { | ||
1419 | .name = "Kernel code", | ||
1420 | .start = 0, | ||
1421 | .end = 0, | ||
1422 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
1423 | }; | ||
1424 | |||
1425 | /* | ||
1426 | * We reserve all resources above 4GB so that PCI won't try to put | ||
1427 | * mappings above 4GB; the standard allows that for some devices but | ||
1428 | * the probing code trunates values to 32 bits. | ||
1429 | */ | ||
1430 | #ifdef CONFIG_PCI | ||
1431 | static struct resource* __init | ||
1432 | insert_non_bus_resource(void) | ||
1433 | { | ||
1434 | struct resource *res = | ||
1435 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | ||
1436 | res->name = "Non-Bus Physical Address Space"; | ||
1437 | res->start = (1ULL << 32); | ||
1438 | res->end = -1LL; | ||
1439 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
1440 | if (insert_resource(&iomem_resource, res)) { | ||
1441 | kfree(res); | ||
1442 | return NULL; | ||
1443 | } | ||
1444 | return res; | ||
1445 | } | ||
1446 | #endif | ||
1447 | |||
1448 | static struct resource* __init | ||
1449 | insert_ram_resource(u64 start_pfn, u64 end_pfn) | ||
1450 | { | ||
1451 | struct resource *res = | ||
1452 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | ||
1453 | res->name = "System RAM"; | ||
1454 | res->start = start_pfn << PAGE_SHIFT; | ||
1455 | res->end = (end_pfn << PAGE_SHIFT) - 1; | ||
1456 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
1457 | if (insert_resource(&iomem_resource, res)) { | ||
1458 | kfree(res); | ||
1459 | return NULL; | ||
1460 | } | ||
1461 | return res; | ||
1462 | } | ||
1463 | |||
1464 | /* | ||
1465 | * Request address space for all standard resources | ||
1466 | * | ||
1467 | * If the system includes PCI root complex drivers, we need to create | ||
1468 | * a window just below 4GB where PCI BARs can be mapped. | ||
1469 | */ | ||
1470 | static int __init request_standard_resources(void) | ||
1471 | { | ||
1472 | int i; | ||
1473 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | ||
1474 | |||
1475 | iomem_resource.end = -1LL; | ||
1476 | #ifdef CONFIG_PCI | ||
1477 | insert_non_bus_resource(); | ||
1478 | #endif | ||
1479 | |||
1480 | for_each_online_node(i) { | ||
1481 | u64 start_pfn = node_start_pfn[i]; | ||
1482 | u64 end_pfn = node_end_pfn[i]; | ||
1483 | |||
1484 | #ifdef CONFIG_PCI | ||
1485 | if (start_pfn <= pci_reserve_start_pfn && | ||
1486 | end_pfn > pci_reserve_start_pfn) { | ||
1487 | if (end_pfn > pci_reserve_end_pfn) | ||
1488 | insert_ram_resource(pci_reserve_end_pfn, | ||
1489 | end_pfn); | ||
1490 | end_pfn = pci_reserve_start_pfn; | ||
1491 | } | ||
1492 | #endif | ||
1493 | insert_ram_resource(start_pfn, end_pfn); | ||
1494 | } | ||
1495 | |||
1496 | code_resource.start = __pa(_text - CODE_DELTA); | ||
1497 | code_resource.end = __pa(_etext - CODE_DELTA)-1; | ||
1498 | data_resource.start = __pa(_sdata); | ||
1499 | data_resource.end = __pa(_end)-1; | ||
1500 | |||
1501 | insert_resource(&iomem_resource, &code_resource); | ||
1502 | insert_resource(&iomem_resource, &data_resource); | ||
1503 | |||
1504 | #ifdef CONFIG_KEXEC | ||
1505 | insert_resource(&iomem_resource, &crashk_res); | ||
1506 | #endif | ||
1507 | |||
1508 | return 0; | ||
1509 | } | ||
1510 | |||
1511 | subsys_initcall(request_standard_resources); | ||
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c new file mode 100644 index 000000000000..45b66a3c991f --- /dev/null +++ b/arch/tile/kernel/signal.c | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/sched.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/unistd.h> | ||
25 | #include <linux/stddef.h> | ||
26 | #include <linux/personality.h> | ||
27 | #include <linux/suspend.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/elf.h> | ||
30 | #include <linux/compat.h> | ||
31 | #include <linux/syscalls.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/ucontext.h> | ||
35 | #include <asm/sigframe.h> | ||
36 | #include <asm/syscalls.h> | ||
37 | #include <arch/interrupts.h> | ||
38 | |||
39 | #define DEBUG_SIG 0 | ||
40 | |||
41 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
42 | |||
43 | |||
44 | long _sys_sigaltstack(const stack_t __user *uss, | ||
45 | stack_t __user *uoss, struct pt_regs *regs) | ||
46 | { | ||
47 | return do_sigaltstack(uss, uoss, regs->sp); | ||
48 | } | ||
49 | |||
50 | |||
51 | /* | ||
52 | * Do a signal return; undo the signal stack. | ||
53 | */ | ||
54 | |||
55 | int restore_sigcontext(struct pt_regs *regs, | ||
56 | struct sigcontext __user *sc, long *pr0) | ||
57 | { | ||
58 | int err = 0; | ||
59 | int i; | ||
60 | |||
61 | /* Always make any pending restarted system calls return -EINTR */ | ||
62 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
63 | |||
64 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | ||
65 | err |= __get_user(((long *)regs)[i], | ||
66 | &((long __user *)(&sc->regs))[i]); | ||
67 | |||
68 | regs->faultnum = INT_SWINT_1_SIGRETURN; | ||
69 | |||
70 | err |= __get_user(*pr0, &sc->regs.regs[0]); | ||
71 | return err; | ||
72 | } | ||
73 | |||
74 | /* sigreturn() returns long since it restores r0 in the interrupted code. */ | ||
75 | long _sys_rt_sigreturn(struct pt_regs *regs) | ||
76 | { | ||
77 | struct rt_sigframe __user *frame = | ||
78 | (struct rt_sigframe __user *)(regs->sp); | ||
79 | sigset_t set; | ||
80 | long r0; | ||
81 | |||
82 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
83 | goto badframe; | ||
84 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
85 | goto badframe; | ||
86 | |||
87 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
88 | spin_lock_irq(¤t->sighand->siglock); | ||
89 | current->blocked = set; | ||
90 | recalc_sigpending(); | ||
91 | spin_unlock_irq(¤t->sighand->siglock); | ||
92 | |||
93 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) | ||
94 | goto badframe; | ||
95 | |||
96 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) | ||
97 | goto badframe; | ||
98 | |||
99 | return r0; | ||
100 | |||
101 | badframe: | ||
102 | force_sig(SIGSEGV, current); | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Set up a signal frame. | ||
108 | */ | ||
109 | |||
110 | int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) | ||
111 | { | ||
112 | int i, err = 0; | ||
113 | |||
114 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | ||
115 | err |= __put_user(((long *)regs)[i], | ||
116 | &((long __user *)(&sc->regs))[i]); | ||
117 | |||
118 | return err; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Determine which stack to use.. | ||
123 | */ | ||
124 | static inline void __user *get_sigframe(struct k_sigaction *ka, | ||
125 | struct pt_regs *regs, | ||
126 | size_t frame_size) | ||
127 | { | ||
128 | unsigned long sp; | ||
129 | |||
130 | /* Default to using normal stack */ | ||
131 | sp = regs->sp; | ||
132 | |||
133 | /* | ||
134 | * If we are on the alternate signal stack and would overflow | ||
135 | * it, don't. Return an always-bogus address instead so we | ||
136 | * will die with SIGSEGV. | ||
137 | */ | ||
138 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
139 | return (void __user __force *)-1UL; | ||
140 | |||
141 | /* This is the X/Open sanctioned signal stack switching. */ | ||
142 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
143 | if (sas_ss_flags(sp) == 0) | ||
144 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
145 | } | ||
146 | |||
147 | sp -= frame_size; | ||
148 | /* | ||
149 | * Align the stack pointer according to the TILE ABI, | ||
150 | * i.e. so that on function entry (sp & 15) == 0. | ||
151 | */ | ||
152 | sp &= -16UL; | ||
153 | return (void __user *) sp; | ||
154 | } | ||
155 | |||
156 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
157 | sigset_t *set, struct pt_regs *regs) | ||
158 | { | ||
159 | unsigned long restorer; | ||
160 | struct rt_sigframe __user *frame; | ||
161 | int err = 0; | ||
162 | int usig; | ||
163 | |||
164 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
165 | |||
166 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
167 | goto give_sigsegv; | ||
168 | |||
169 | usig = current_thread_info()->exec_domain | ||
170 | && current_thread_info()->exec_domain->signal_invmap | ||
171 | && sig < 32 | ||
172 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
173 | : sig; | ||
174 | |||
175 | /* Always write at least the signal number for the stack backtracer. */ | ||
176 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
177 | /* At sigreturn time, restore the callee-save registers too. */ | ||
178 | err |= copy_siginfo_to_user(&frame->info, info); | ||
179 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
180 | } else { | ||
181 | err |= __put_user(info->si_signo, &frame->info.si_signo); | ||
182 | } | ||
183 | |||
184 | /* Create the ucontext. */ | ||
185 | err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); | ||
186 | err |= __put_user(0, &frame->uc.uc_flags); | ||
187 | err |= __put_user(NULL, &frame->uc.uc_link); | ||
188 | err |= __put_user((void __user *)(current->sas_ss_sp), | ||
189 | &frame->uc.uc_stack.ss_sp); | ||
190 | err |= __put_user(sas_ss_flags(regs->sp), | ||
191 | &frame->uc.uc_stack.ss_flags); | ||
192 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
193 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | ||
194 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
195 | if (err) | ||
196 | goto give_sigsegv; | ||
197 | |||
198 | restorer = VDSO_BASE; | ||
199 | if (ka->sa.sa_flags & SA_RESTORER) | ||
200 | restorer = (unsigned long) ka->sa.sa_restorer; | ||
201 | |||
202 | /* | ||
203 | * Set up registers for signal handler. | ||
204 | * Registers that we don't modify keep the value they had from | ||
205 | * user-space at the time we took the signal. | ||
206 | */ | ||
207 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
208 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | ||
209 | regs->sp = (unsigned long) frame; | ||
210 | regs->lr = restorer; | ||
211 | regs->regs[0] = (unsigned long) usig; | ||
212 | |||
213 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
214 | /* Need extra arguments, so mark to restore caller-saves. */ | ||
215 | regs->regs[1] = (unsigned long) &frame->info; | ||
216 | regs->regs[2] = (unsigned long) &frame->uc; | ||
217 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Notify any tracer that was single-stepping it. | ||
222 | * The tracer may want to single-step inside the | ||
223 | * handler too. | ||
224 | */ | ||
225 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
226 | ptrace_notify(SIGTRAP); | ||
227 | |||
228 | return 0; | ||
229 | |||
230 | give_sigsegv: | ||
231 | force_sigsegv(sig, current); | ||
232 | return -EFAULT; | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * OK, we're invoking a handler | ||
237 | */ | ||
238 | |||
239 | static int handle_signal(unsigned long sig, siginfo_t *info, | ||
240 | struct k_sigaction *ka, sigset_t *oldset, | ||
241 | struct pt_regs *regs) | ||
242 | { | ||
243 | int ret; | ||
244 | |||
245 | |||
246 | /* Are we from a system call? */ | ||
247 | if (regs->faultnum == INT_SWINT_1) { | ||
248 | /* If so, check system call restarting.. */ | ||
249 | switch (regs->regs[0]) { | ||
250 | case -ERESTART_RESTARTBLOCK: | ||
251 | case -ERESTARTNOHAND: | ||
252 | regs->regs[0] = -EINTR; | ||
253 | break; | ||
254 | |||
255 | case -ERESTARTSYS: | ||
256 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
257 | regs->regs[0] = -EINTR; | ||
258 | break; | ||
259 | } | ||
260 | /* fallthrough */ | ||
261 | case -ERESTARTNOINTR: | ||
262 | /* Reload caller-saves to restore r0..r5 and r10. */ | ||
263 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
264 | regs->regs[0] = regs->orig_r0; | ||
265 | regs->pc -= 8; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | /* Set up the stack frame */ | ||
270 | #ifdef CONFIG_COMPAT | ||
271 | if (is_compat_task()) | ||
272 | ret = compat_setup_rt_frame(sig, ka, info, oldset, regs); | ||
273 | else | ||
274 | #endif | ||
275 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | ||
276 | if (ret == 0) { | ||
277 | /* This code is only called from system calls or from | ||
278 | * the work_pending path in the return-to-user code, and | ||
279 | * either way we can re-enable interrupts unconditionally. | ||
280 | */ | ||
281 | spin_lock_irq(¤t->sighand->siglock); | ||
282 | sigorsets(¤t->blocked, | ||
283 | ¤t->blocked, &ka->sa.sa_mask); | ||
284 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
285 | sigaddset(¤t->blocked, sig); | ||
286 | recalc_sigpending(); | ||
287 | spin_unlock_irq(¤t->sighand->siglock); | ||
288 | } | ||
289 | |||
290 | return ret; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
295 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
296 | * mistake. | ||
297 | */ | ||
298 | void do_signal(struct pt_regs *regs) | ||
299 | { | ||
300 | siginfo_t info; | ||
301 | int signr; | ||
302 | struct k_sigaction ka; | ||
303 | sigset_t *oldset; | ||
304 | |||
305 | /* | ||
306 | * i386 will check if we're coming from kernel mode and bail out | ||
307 | * here. In my experience this just turns weird crashes into | ||
308 | * weird spin-hangs. But if we find a case where this seems | ||
309 | * helpful, we can reinstate the check on "!user_mode(regs)". | ||
310 | */ | ||
311 | |||
312 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) | ||
313 | oldset = ¤t->saved_sigmask; | ||
314 | else | ||
315 | oldset = ¤t->blocked; | ||
316 | |||
317 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
318 | if (signr > 0) { | ||
319 | /* Whee! Actually deliver the signal. */ | ||
320 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { | ||
321 | /* | ||
322 | * A signal was successfully delivered; the saved | ||
323 | * sigmask will have been stored in the signal frame, | ||
324 | * and will be restored by sigreturn, so we can simply | ||
325 | * clear the TS_RESTORE_SIGMASK flag. | ||
326 | */ | ||
327 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
328 | } | ||
329 | |||
330 | return; | ||
331 | } | ||
332 | |||
333 | /* Did we come from a system call? */ | ||
334 | if (regs->faultnum == INT_SWINT_1) { | ||
335 | /* Restart the system call - no handlers present */ | ||
336 | switch (regs->regs[0]) { | ||
337 | case -ERESTARTNOHAND: | ||
338 | case -ERESTARTSYS: | ||
339 | case -ERESTARTNOINTR: | ||
340 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
341 | regs->regs[0] = regs->orig_r0; | ||
342 | regs->pc -= 8; | ||
343 | break; | ||
344 | |||
345 | case -ERESTART_RESTARTBLOCK: | ||
346 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
347 | regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall; | ||
348 | regs->pc -= 8; | ||
349 | break; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | /* If there's no signal to deliver, just put the saved sigmask back. */ | ||
354 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
355 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
356 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
357 | } | ||
358 | } | ||
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c new file mode 100644 index 000000000000..5ec4b9c651f2 --- /dev/null +++ b/arch/tile/kernel/single_step.c | |||
@@ -0,0 +1,663 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * A code-rewriter that enables instruction single-stepping. | ||
15 | * Derived from iLib's single-stepping code. | ||
16 | */ | ||
17 | |||
18 | #ifndef __tilegx__ /* No support for single-step yet. */ | ||
19 | |||
20 | /* These functions are only used on the TILE platform */ | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/thread_info.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <asm/cacheflush.h> | ||
28 | #include <asm/opcode-tile.h> | ||
29 | #include <asm/opcode_constants.h> | ||
30 | #include <arch/abi.h> | ||
31 | |||
32 | #define signExtend17(val) sign_extend((val), 17) | ||
33 | #define TILE_X1_MASK (0xffffffffULL << 31) | ||
34 | |||
35 | int unaligned_printk; | ||
36 | |||
37 | static int __init setup_unaligned_printk(char *str) | ||
38 | { | ||
39 | long val; | ||
40 | if (strict_strtol(str, 0, &val) != 0) | ||
41 | return 0; | ||
42 | unaligned_printk = val; | ||
43 | pr_info("Printk for each unaligned data accesses is %s\n", | ||
44 | unaligned_printk ? "enabled" : "disabled"); | ||
45 | return 1; | ||
46 | } | ||
47 | __setup("unaligned_printk=", setup_unaligned_printk); | ||
48 | |||
49 | unsigned int unaligned_fixup_count; | ||
50 | |||
51 | enum mem_op { | ||
52 | MEMOP_NONE, | ||
53 | MEMOP_LOAD, | ||
54 | MEMOP_STORE, | ||
55 | MEMOP_LOAD_POSTINCR, | ||
56 | MEMOP_STORE_POSTINCR | ||
57 | }; | ||
58 | |||
59 | static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset) | ||
60 | { | ||
61 | tile_bundle_bits result; | ||
62 | |||
63 | /* mask out the old offset */ | ||
64 | tile_bundle_bits mask = create_BrOff_X1(-1); | ||
65 | result = n & (~mask); | ||
66 | |||
67 | /* or in the new offset */ | ||
68 | result |= create_BrOff_X1(offset); | ||
69 | |||
70 | return result; | ||
71 | } | ||
72 | |||
73 | static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) | ||
74 | { | ||
75 | tile_bundle_bits result; | ||
76 | tile_bundle_bits op; | ||
77 | |||
78 | result = n & (~TILE_X1_MASK); | ||
79 | |||
80 | op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | | ||
81 | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | | ||
82 | create_Dest_X1(dest) | | ||
83 | create_SrcB_X1(TREG_ZERO) | | ||
84 | create_SrcA_X1(src) ; | ||
85 | |||
86 | result |= op; | ||
87 | return result; | ||
88 | } | ||
89 | |||
90 | static inline tile_bundle_bits nop_X1(tile_bundle_bits n) | ||
91 | { | ||
92 | return move_X1(n, TREG_ZERO, TREG_ZERO); | ||
93 | } | ||
94 | |||
95 | static inline tile_bundle_bits addi_X1( | ||
96 | tile_bundle_bits n, int dest, int src, int imm) | ||
97 | { | ||
98 | n &= ~TILE_X1_MASK; | ||
99 | |||
100 | n |= (create_SrcA_X1(src) | | ||
101 | create_Dest_X1(dest) | | ||
102 | create_Imm8_X1(imm) | | ||
103 | create_S_X1(0) | | ||
104 | create_Opcode_X1(IMM_0_OPCODE_X1) | | ||
105 | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); | ||
106 | |||
107 | return n; | ||
108 | } | ||
109 | |||
110 | static tile_bundle_bits rewrite_load_store_unaligned( | ||
111 | struct single_step_state *state, | ||
112 | tile_bundle_bits bundle, | ||
113 | struct pt_regs *regs, | ||
114 | enum mem_op mem_op, | ||
115 | int size, int sign_ext) | ||
116 | { | ||
117 | unsigned char __user *addr; | ||
118 | int val_reg, addr_reg, err, val; | ||
119 | |||
120 | /* Get address and value registers */ | ||
121 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) { | ||
122 | addr_reg = get_SrcA_Y2(bundle); | ||
123 | val_reg = get_SrcBDest_Y2(bundle); | ||
124 | } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | ||
125 | addr_reg = get_SrcA_X1(bundle); | ||
126 | val_reg = get_Dest_X1(bundle); | ||
127 | } else { | ||
128 | addr_reg = get_SrcA_X1(bundle); | ||
129 | val_reg = get_SrcB_X1(bundle); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * If registers are not GPRs, don't try to handle it. | ||
134 | * | ||
135 | * FIXME: we could handle non-GPR loads by getting the real value | ||
136 | * from memory, writing it to the single step buffer, using a | ||
137 | * temp_reg to hold a pointer to that memory, then executing that | ||
138 | * instruction and resetting temp_reg. For non-GPR stores, it's a | ||
139 | * little trickier; we could use the single step buffer for that | ||
140 | * too, but we'd have to add some more state bits so that we could | ||
141 | * call back in here to copy that value to the real target. For | ||
142 | * now, we just handle the simple case. | ||
143 | */ | ||
144 | if ((val_reg >= PTREGS_NR_GPRS && | ||
145 | (val_reg != TREG_ZERO || | ||
146 | mem_op == MEMOP_LOAD || | ||
147 | mem_op == MEMOP_LOAD_POSTINCR)) || | ||
148 | addr_reg >= PTREGS_NR_GPRS) | ||
149 | return bundle; | ||
150 | |||
151 | /* If it's aligned, don't handle it specially */ | ||
152 | addr = (void __user *)regs->regs[addr_reg]; | ||
153 | if (((unsigned long)addr % size) == 0) | ||
154 | return bundle; | ||
155 | |||
156 | #ifndef __LITTLE_ENDIAN | ||
157 | # error We assume little-endian representation with copy_xx_user size 2 here | ||
158 | #endif | ||
159 | /* Handle unaligned load/store */ | ||
160 | if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | ||
161 | unsigned short val_16; | ||
162 | switch (size) { | ||
163 | case 2: | ||
164 | err = copy_from_user(&val_16, addr, sizeof(val_16)); | ||
165 | val = sign_ext ? ((short)val_16) : val_16; | ||
166 | break; | ||
167 | case 4: | ||
168 | err = copy_from_user(&val, addr, sizeof(val)); | ||
169 | break; | ||
170 | default: | ||
171 | BUG(); | ||
172 | } | ||
173 | if (err == 0) { | ||
174 | state->update_reg = val_reg; | ||
175 | state->update_value = val; | ||
176 | state->update = 1; | ||
177 | } | ||
178 | } else { | ||
179 | val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; | ||
180 | err = copy_to_user(addr, &val, size); | ||
181 | } | ||
182 | |||
183 | if (err) { | ||
184 | siginfo_t info = { | ||
185 | .si_signo = SIGSEGV, | ||
186 | .si_code = SEGV_MAPERR, | ||
187 | .si_addr = addr | ||
188 | }; | ||
189 | force_sig_info(info.si_signo, &info, current); | ||
190 | return (tile_bundle_bits) 0; | ||
191 | } | ||
192 | |||
193 | if (unaligned_fixup == 0) { | ||
194 | siginfo_t info = { | ||
195 | .si_signo = SIGBUS, | ||
196 | .si_code = BUS_ADRALN, | ||
197 | .si_addr = addr | ||
198 | }; | ||
199 | force_sig_info(info.si_signo, &info, current); | ||
200 | return (tile_bundle_bits) 0; | ||
201 | } | ||
202 | |||
203 | if (unaligned_printk || unaligned_fixup_count == 0) { | ||
204 | pr_info("Process %d/%s: PC %#lx: Fixup of" | ||
205 | " unaligned %s at %#lx.\n", | ||
206 | current->pid, current->comm, regs->pc, | ||
207 | (mem_op == MEMOP_LOAD || | ||
208 | mem_op == MEMOP_LOAD_POSTINCR) ? | ||
209 | "load" : "store", | ||
210 | (unsigned long)addr); | ||
211 | if (!unaligned_printk) { | ||
212 | #define P pr_info | ||
213 | P("\n"); | ||
214 | P("Unaligned fixups in the kernel will slow your application considerably.\n"); | ||
215 | P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"); | ||
216 | P("which requests the kernel show all unaligned fixups, or write a \"0\"\n"); | ||
217 | P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"); | ||
218 | P("access will become a SIGBUS you can debug. No further warnings will be\n"); | ||
219 | P("shown so as to avoid additional slowdown, but you can track the number\n"); | ||
220 | P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"); | ||
221 | P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"); | ||
222 | P("\n"); | ||
223 | #undef P | ||
224 | } | ||
225 | } | ||
226 | ++unaligned_fixup_count; | ||
227 | |||
228 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) { | ||
229 | /* Convert the Y2 instruction to a prefetch. */ | ||
230 | bundle &= ~(create_SrcBDest_Y2(-1) | | ||
231 | create_Opcode_Y2(-1)); | ||
232 | bundle |= (create_SrcBDest_Y2(TREG_ZERO) | | ||
233 | create_Opcode_Y2(LW_OPCODE_Y2)); | ||
234 | /* Replace the load postincr with an addi */ | ||
235 | } else if (mem_op == MEMOP_LOAD_POSTINCR) { | ||
236 | bundle = addi_X1(bundle, addr_reg, addr_reg, | ||
237 | get_Imm8_X1(bundle)); | ||
238 | /* Replace the store postincr with an addi */ | ||
239 | } else if (mem_op == MEMOP_STORE_POSTINCR) { | ||
240 | bundle = addi_X1(bundle, addr_reg, addr_reg, | ||
241 | get_Dest_Imm8_X1(bundle)); | ||
242 | } else { | ||
243 | /* Convert the X1 instruction to a nop. */ | ||
244 | bundle &= ~(create_Opcode_X1(-1) | | ||
245 | create_UnShOpcodeExtension_X1(-1) | | ||
246 | create_UnOpcodeExtension_X1(-1)); | ||
247 | bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | | ||
248 | create_UnShOpcodeExtension_X1( | ||
249 | UN_0_SHUN_0_OPCODE_X1) | | ||
250 | create_UnOpcodeExtension_X1( | ||
251 | NOP_UN_0_SHUN_0_OPCODE_X1)); | ||
252 | } | ||
253 | |||
254 | return bundle; | ||
255 | } | ||
256 | |||
257 | /** | ||
258 | * single_step_once() - entry point when single stepping has been triggered. | ||
259 | * @regs: The machine register state | ||
260 | * | ||
261 | * When we arrive at this routine via a trampoline, the single step | ||
262 | * engine copies the executing bundle to the single step buffer. | ||
263 | * If the instruction is a condition branch, then the target is | ||
264 | * reset to one past the next instruction. If the instruction | ||
265 | * sets the lr, then that is noted. If the instruction is a jump | ||
266 | * or call, then the new target pc is preserved and the current | ||
267 | * bundle instruction set to null. | ||
268 | * | ||
269 | * The necessary post-single-step rewriting information is stored in | ||
270 | * single_step_state-> We use data segment values because the | ||
271 | * stack will be rewound when we run the rewritten single-stepped | ||
272 | * instruction. | ||
273 | */ | ||
274 | void single_step_once(struct pt_regs *regs) | ||
275 | { | ||
276 | extern tile_bundle_bits __single_step_ill_insn; | ||
277 | extern tile_bundle_bits __single_step_j_insn; | ||
278 | extern tile_bundle_bits __single_step_addli_insn; | ||
279 | extern tile_bundle_bits __single_step_auli_insn; | ||
280 | struct thread_info *info = (void *)current_thread_info(); | ||
281 | struct single_step_state *state = info->step_state; | ||
282 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | ||
283 | tile_bundle_bits __user *buffer, *pc; | ||
284 | tile_bundle_bits bundle; | ||
285 | int temp_reg; | ||
286 | int target_reg = TREG_LR; | ||
287 | int err; | ||
288 | enum mem_op mem_op = MEMOP_NONE; | ||
289 | int size = 0, sign_ext = 0; /* happy compiler */ | ||
290 | |||
291 | asm( | ||
292 | " .pushsection .rodata.single_step\n" | ||
293 | " .align 8\n" | ||
294 | " .globl __single_step_ill_insn\n" | ||
295 | "__single_step_ill_insn:\n" | ||
296 | " ill\n" | ||
297 | " .globl __single_step_addli_insn\n" | ||
298 | "__single_step_addli_insn:\n" | ||
299 | " { nop; addli r0, zero, 0 }\n" | ||
300 | " .globl __single_step_auli_insn\n" | ||
301 | "__single_step_auli_insn:\n" | ||
302 | " { nop; auli r0, r0, 0 }\n" | ||
303 | " .globl __single_step_j_insn\n" | ||
304 | "__single_step_j_insn:\n" | ||
305 | " j .\n" | ||
306 | " .popsection\n" | ||
307 | ); | ||
308 | |||
309 | if (state == NULL) { | ||
310 | /* allocate a page of writable, executable memory */ | ||
311 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | ||
312 | if (state == NULL) { | ||
313 | pr_err("Out of kernel memory trying to single-step\n"); | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | /* allocate a cache line of writable, executable memory */ | ||
318 | down_write(¤t->mm->mmap_sem); | ||
319 | buffer = (void __user *) do_mmap(NULL, 0, 64, | ||
320 | PROT_EXEC | PROT_READ | PROT_WRITE, | ||
321 | MAP_PRIVATE | MAP_ANONYMOUS, | ||
322 | 0); | ||
323 | up_write(¤t->mm->mmap_sem); | ||
324 | |||
325 | if (IS_ERR((void __force *)buffer)) { | ||
326 | kfree(state); | ||
327 | pr_err("Out of kernel pages trying to single-step\n"); | ||
328 | return; | ||
329 | } | ||
330 | |||
331 | state->buffer = buffer; | ||
332 | state->is_enabled = 0; | ||
333 | |||
334 | info->step_state = state; | ||
335 | |||
336 | /* Validate our stored instruction patterns */ | ||
337 | BUG_ON(get_Opcode_X1(__single_step_addli_insn) != | ||
338 | ADDLI_OPCODE_X1); | ||
339 | BUG_ON(get_Opcode_X1(__single_step_auli_insn) != | ||
340 | AULI_OPCODE_X1); | ||
341 | BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); | ||
342 | BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); | ||
343 | BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); | ||
344 | } | ||
345 | |||
346 | /* | ||
347 | * If we are returning from a syscall, we still haven't hit the | ||
348 | * "ill" for the swint1 instruction. So back the PC up to be | ||
349 | * pointing at the swint1, but we'll actually return directly | ||
350 | * back to the "ill" so we come back in via SIGILL as if we | ||
351 | * had "executed" the swint1 without ever being in kernel space. | ||
352 | */ | ||
353 | if (regs->faultnum == INT_SWINT_1) | ||
354 | regs->pc -= 8; | ||
355 | |||
356 | pc = (tile_bundle_bits __user *)(regs->pc); | ||
357 | if (get_user(bundle, pc) != 0) { | ||
358 | pr_err("Couldn't read instruction at %p trying to step\n", pc); | ||
359 | return; | ||
360 | } | ||
361 | |||
362 | /* We'll follow the instruction with 2 ill op bundles */ | ||
363 | state->orig_pc = (unsigned long)pc; | ||
364 | state->next_pc = (unsigned long)(pc + 1); | ||
365 | state->branch_next_pc = 0; | ||
366 | state->update = 0; | ||
367 | |||
368 | if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) { | ||
369 | /* two wide, check for control flow */ | ||
370 | int opcode = get_Opcode_X1(bundle); | ||
371 | |||
372 | switch (opcode) { | ||
373 | /* branches */ | ||
374 | case BRANCH_OPCODE_X1: | ||
375 | { | ||
376 | int32_t offset = signExtend17(get_BrOff_X1(bundle)); | ||
377 | |||
378 | /* | ||
379 | * For branches, we use a rewriting trick to let the | ||
380 | * hardware evaluate whether the branch is taken or | ||
381 | * untaken. We record the target offset and then | ||
382 | * rewrite the branch instruction to target 1 insn | ||
383 | * ahead if the branch is taken. We then follow the | ||
384 | * rewritten branch with two bundles, each containing | ||
385 | * an "ill" instruction. The supervisor examines the | ||
386 | * pc after the single step code is executed, and if | ||
387 | * the pc is the first ill instruction, then the | ||
388 | * branch (if any) was not taken. If the pc is the | ||
389 | * second ill instruction, then the branch was | ||
390 | * taken. The new pc is computed for these cases, and | ||
391 | * inserted into the registers for the thread. If | ||
392 | * the pc is the start of the single step code, then | ||
393 | * an exception or interrupt was taken before the | ||
394 | * code started processing, and the same "original" | ||
395 | * pc is restored. This change, different from the | ||
396 | * original implementation, has the advantage of | ||
397 | * executing a single user instruction. | ||
398 | */ | ||
399 | state->branch_next_pc = (unsigned long)(pc + offset); | ||
400 | |||
401 | /* rewrite branch offset to go forward one bundle */ | ||
402 | bundle = set_BrOff_X1(bundle, 2); | ||
403 | } | ||
404 | break; | ||
405 | |||
406 | /* jumps */ | ||
407 | case JALB_OPCODE_X1: | ||
408 | case JALF_OPCODE_X1: | ||
409 | state->update = 1; | ||
410 | state->next_pc = | ||
411 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | ||
412 | break; | ||
413 | |||
414 | case JB_OPCODE_X1: | ||
415 | case JF_OPCODE_X1: | ||
416 | state->next_pc = | ||
417 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | ||
418 | bundle = nop_X1(bundle); | ||
419 | break; | ||
420 | |||
421 | case SPECIAL_0_OPCODE_X1: | ||
422 | switch (get_RRROpcodeExtension_X1(bundle)) { | ||
423 | /* jump-register */ | ||
424 | case JALRP_SPECIAL_0_OPCODE_X1: | ||
425 | case JALR_SPECIAL_0_OPCODE_X1: | ||
426 | state->update = 1; | ||
427 | state->next_pc = | ||
428 | regs->regs[get_SrcA_X1(bundle)]; | ||
429 | break; | ||
430 | |||
431 | case JRP_SPECIAL_0_OPCODE_X1: | ||
432 | case JR_SPECIAL_0_OPCODE_X1: | ||
433 | state->next_pc = | ||
434 | regs->regs[get_SrcA_X1(bundle)]; | ||
435 | bundle = nop_X1(bundle); | ||
436 | break; | ||
437 | |||
438 | case LNK_SPECIAL_0_OPCODE_X1: | ||
439 | state->update = 1; | ||
440 | target_reg = get_Dest_X1(bundle); | ||
441 | break; | ||
442 | |||
443 | /* stores */ | ||
444 | case SH_SPECIAL_0_OPCODE_X1: | ||
445 | mem_op = MEMOP_STORE; | ||
446 | size = 2; | ||
447 | break; | ||
448 | |||
449 | case SW_SPECIAL_0_OPCODE_X1: | ||
450 | mem_op = MEMOP_STORE; | ||
451 | size = 4; | ||
452 | break; | ||
453 | } | ||
454 | break; | ||
455 | |||
456 | /* loads and iret */ | ||
457 | case SHUN_0_OPCODE_X1: | ||
458 | if (get_UnShOpcodeExtension_X1(bundle) == | ||
459 | UN_0_SHUN_0_OPCODE_X1) { | ||
460 | switch (get_UnOpcodeExtension_X1(bundle)) { | ||
461 | case LH_UN_0_SHUN_0_OPCODE_X1: | ||
462 | mem_op = MEMOP_LOAD; | ||
463 | size = 2; | ||
464 | sign_ext = 1; | ||
465 | break; | ||
466 | |||
467 | case LH_U_UN_0_SHUN_0_OPCODE_X1: | ||
468 | mem_op = MEMOP_LOAD; | ||
469 | size = 2; | ||
470 | sign_ext = 0; | ||
471 | break; | ||
472 | |||
473 | case LW_UN_0_SHUN_0_OPCODE_X1: | ||
474 | mem_op = MEMOP_LOAD; | ||
475 | size = 4; | ||
476 | break; | ||
477 | |||
478 | case IRET_UN_0_SHUN_0_OPCODE_X1: | ||
479 | { | ||
480 | unsigned long ex0_0 = __insn_mfspr( | ||
481 | SPR_EX_CONTEXT_0_0); | ||
482 | unsigned long ex0_1 = __insn_mfspr( | ||
483 | SPR_EX_CONTEXT_0_1); | ||
484 | /* | ||
485 | * Special-case it if we're iret'ing | ||
486 | * to PL0 again. Otherwise just let | ||
487 | * it run and it will generate SIGILL. | ||
488 | */ | ||
489 | if (EX1_PL(ex0_1) == USER_PL) { | ||
490 | state->next_pc = ex0_0; | ||
491 | regs->ex1 = ex0_1; | ||
492 | bundle = nop_X1(bundle); | ||
493 | } | ||
494 | } | ||
495 | } | ||
496 | } | ||
497 | break; | ||
498 | |||
499 | #if CHIP_HAS_WH64() | ||
500 | /* postincrement operations */ | ||
501 | case IMM_0_OPCODE_X1: | ||
502 | switch (get_ImmOpcodeExtension_X1(bundle)) { | ||
503 | case LWADD_IMM_0_OPCODE_X1: | ||
504 | mem_op = MEMOP_LOAD_POSTINCR; | ||
505 | size = 4; | ||
506 | break; | ||
507 | |||
508 | case LHADD_IMM_0_OPCODE_X1: | ||
509 | mem_op = MEMOP_LOAD_POSTINCR; | ||
510 | size = 2; | ||
511 | sign_ext = 1; | ||
512 | break; | ||
513 | |||
514 | case LHADD_U_IMM_0_OPCODE_X1: | ||
515 | mem_op = MEMOP_LOAD_POSTINCR; | ||
516 | size = 2; | ||
517 | sign_ext = 0; | ||
518 | break; | ||
519 | |||
520 | case SWADD_IMM_0_OPCODE_X1: | ||
521 | mem_op = MEMOP_STORE_POSTINCR; | ||
522 | size = 4; | ||
523 | break; | ||
524 | |||
525 | case SHADD_IMM_0_OPCODE_X1: | ||
526 | mem_op = MEMOP_STORE_POSTINCR; | ||
527 | size = 2; | ||
528 | break; | ||
529 | |||
530 | default: | ||
531 | break; | ||
532 | } | ||
533 | break; | ||
534 | #endif /* CHIP_HAS_WH64() */ | ||
535 | } | ||
536 | |||
537 | if (state->update) { | ||
538 | /* | ||
539 | * Get an available register. We start with a | ||
540 | * bitmask with 1's for available registers. | ||
541 | * We truncate to the low 32 registers since | ||
542 | * we are guaranteed to have set bits in the | ||
543 | * low 32 bits, then use ctz to pick the first. | ||
544 | */ | ||
545 | u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | | ||
546 | (1ULL << get_SrcA_X0(bundle)) | | ||
547 | (1ULL << get_SrcB_X0(bundle)) | | ||
548 | (1ULL << target_reg)); | ||
549 | temp_reg = __builtin_ctz(mask); | ||
550 | state->update_reg = temp_reg; | ||
551 | state->update_value = regs->regs[temp_reg]; | ||
552 | regs->regs[temp_reg] = (unsigned long) (pc+1); | ||
553 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
554 | bundle = move_X1(bundle, target_reg, temp_reg); | ||
555 | } | ||
556 | } else { | ||
557 | int opcode = get_Opcode_Y2(bundle); | ||
558 | |||
559 | switch (opcode) { | ||
560 | /* loads */ | ||
561 | case LH_OPCODE_Y2: | ||
562 | mem_op = MEMOP_LOAD; | ||
563 | size = 2; | ||
564 | sign_ext = 1; | ||
565 | break; | ||
566 | |||
567 | case LH_U_OPCODE_Y2: | ||
568 | mem_op = MEMOP_LOAD; | ||
569 | size = 2; | ||
570 | sign_ext = 0; | ||
571 | break; | ||
572 | |||
573 | case LW_OPCODE_Y2: | ||
574 | mem_op = MEMOP_LOAD; | ||
575 | size = 4; | ||
576 | break; | ||
577 | |||
578 | /* stores */ | ||
579 | case SH_OPCODE_Y2: | ||
580 | mem_op = MEMOP_STORE; | ||
581 | size = 2; | ||
582 | break; | ||
583 | |||
584 | case SW_OPCODE_Y2: | ||
585 | mem_op = MEMOP_STORE; | ||
586 | size = 4; | ||
587 | break; | ||
588 | } | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Check if we need to rewrite an unaligned load/store. | ||
593 | * Returning zero is a special value meaning we need to SIGSEGV. | ||
594 | */ | ||
595 | if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) { | ||
596 | bundle = rewrite_load_store_unaligned(state, bundle, regs, | ||
597 | mem_op, size, sign_ext); | ||
598 | if (bundle == 0) | ||
599 | return; | ||
600 | } | ||
601 | |||
602 | /* write the bundle to our execution area */ | ||
603 | buffer = state->buffer; | ||
604 | err = __put_user(bundle, buffer++); | ||
605 | |||
606 | /* | ||
607 | * If we're really single-stepping, we take an INT_ILL after. | ||
608 | * If we're just handling an unaligned access, we can just | ||
609 | * jump directly back to where we were in user code. | ||
610 | */ | ||
611 | if (is_single_step) { | ||
612 | err |= __put_user(__single_step_ill_insn, buffer++); | ||
613 | err |= __put_user(__single_step_ill_insn, buffer++); | ||
614 | } else { | ||
615 | long delta; | ||
616 | |||
617 | if (state->update) { | ||
618 | /* We have some state to update; do it inline */ | ||
619 | int ha16; | ||
620 | bundle = __single_step_addli_insn; | ||
621 | bundle |= create_Dest_X1(state->update_reg); | ||
622 | bundle |= create_Imm16_X1(state->update_value); | ||
623 | err |= __put_user(bundle, buffer++); | ||
624 | bundle = __single_step_auli_insn; | ||
625 | bundle |= create_Dest_X1(state->update_reg); | ||
626 | bundle |= create_SrcA_X1(state->update_reg); | ||
627 | ha16 = (state->update_value + 0x8000) >> 16; | ||
628 | bundle |= create_Imm16_X1(ha16); | ||
629 | err |= __put_user(bundle, buffer++); | ||
630 | state->update = 0; | ||
631 | } | ||
632 | |||
633 | /* End with a jump back to the next instruction */ | ||
634 | delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) - | ||
635 | (unsigned long)buffer) >> | ||
636 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; | ||
637 | bundle = __single_step_j_insn; | ||
638 | bundle |= create_JOffLong_X1(delta); | ||
639 | err |= __put_user(bundle, buffer++); | ||
640 | } | ||
641 | |||
642 | if (err) { | ||
643 | pr_err("Fault when writing to single-step buffer\n"); | ||
644 | return; | ||
645 | } | ||
646 | |||
647 | /* | ||
648 | * Flush the buffer. | ||
649 | * We do a local flush only, since this is a thread-specific buffer. | ||
650 | */ | ||
651 | __flush_icache_range((unsigned long)state->buffer, | ||
652 | (unsigned long)buffer); | ||
653 | |||
654 | /* Indicate enabled */ | ||
655 | state->is_enabled = is_single_step; | ||
656 | regs->pc = (unsigned long)state->buffer; | ||
657 | |||
658 | /* Fault immediately if we are coming back from a syscall. */ | ||
659 | if (regs->faultnum == INT_SWINT_1) | ||
660 | regs->pc += 8; | ||
661 | } | ||
662 | |||
663 | #endif /* !__tilegx__ */ | ||
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c new file mode 100644 index 000000000000..1cb5ec79de04 --- /dev/null +++ b/arch/tile/kernel/smp.c | |||
@@ -0,0 +1,256 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE SMP support routines. | ||
15 | */ | ||
16 | |||
17 | #include <linux/smp.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | |||
24 | HV_Topology smp_topology __write_once; | ||
25 | EXPORT_SYMBOL(smp_topology); | ||
26 | |||
27 | #if CHIP_HAS_IPI() | ||
28 | static unsigned long __iomem *ipi_mappings[NR_CPUS]; | ||
29 | #endif | ||
30 | |||
31 | |||
32 | /* | ||
33 | * Top-level send_IPI*() functions to send messages to other cpus. | ||
34 | */ | ||
35 | |||
36 | /* Set by smp_send_stop() to avoid recursive panics. */ | ||
37 | static int stopping_cpus; | ||
38 | |||
39 | void send_IPI_single(int cpu, int tag) | ||
40 | { | ||
41 | HV_Recipient recip = { | ||
42 | .y = cpu / smp_width, | ||
43 | .x = cpu % smp_width, | ||
44 | .state = HV_TO_BE_SENT | ||
45 | }; | ||
46 | int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)); | ||
47 | BUG_ON(rc <= 0); | ||
48 | } | ||
49 | |||
50 | void send_IPI_many(const struct cpumask *mask, int tag) | ||
51 | { | ||
52 | HV_Recipient recip[NR_CPUS]; | ||
53 | int cpu, sent; | ||
54 | int nrecip = 0; | ||
55 | int my_cpu = smp_processor_id(); | ||
56 | for_each_cpu(cpu, mask) { | ||
57 | HV_Recipient *r; | ||
58 | BUG_ON(cpu == my_cpu); | ||
59 | r = &recip[nrecip++]; | ||
60 | r->y = cpu / smp_width; | ||
61 | r->x = cpu % smp_width; | ||
62 | r->state = HV_TO_BE_SENT; | ||
63 | } | ||
64 | sent = 0; | ||
65 | while (sent < nrecip) { | ||
66 | int rc = hv_send_message(recip, nrecip, | ||
67 | (HV_VirtAddr)&tag, sizeof(tag)); | ||
68 | if (rc <= 0) { | ||
69 | if (!stopping_cpus) /* avoid recursive panic */ | ||
70 | panic("hv_send_message returned %d", rc); | ||
71 | break; | ||
72 | } | ||
73 | sent += rc; | ||
74 | } | ||
75 | } | ||
76 | |||
77 | void send_IPI_allbutself(int tag) | ||
78 | { | ||
79 | struct cpumask mask; | ||
80 | cpumask_copy(&mask, cpu_online_mask); | ||
81 | cpumask_clear_cpu(smp_processor_id(), &mask); | ||
82 | send_IPI_many(&mask, tag); | ||
83 | } | ||
84 | |||
85 | |||
86 | /* | ||
87 | * Provide smp_call_function_mask, but also run function locally | ||
88 | * if specified in the mask. | ||
89 | */ | ||
90 | void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), | ||
91 | void *info, bool wait) | ||
92 | { | ||
93 | int cpu = get_cpu(); | ||
94 | smp_call_function_many(mask, func, info, wait); | ||
95 | if (cpumask_test_cpu(cpu, mask)) { | ||
96 | local_irq_disable(); | ||
97 | func(info); | ||
98 | local_irq_enable(); | ||
99 | } | ||
100 | put_cpu(); | ||
101 | } | ||
102 | |||
103 | |||
104 | /* | ||
105 | * Functions related to starting/stopping cpus. | ||
106 | */ | ||
107 | |||
108 | /* Handler to start the current cpu. */ | ||
109 | static void smp_start_cpu_interrupt(void) | ||
110 | { | ||
111 | get_irq_regs()->pc = start_cpu_function_addr; | ||
112 | } | ||
113 | |||
114 | /* Handler to stop the current cpu. */ | ||
115 | static void smp_stop_cpu_interrupt(void) | ||
116 | { | ||
117 | set_cpu_online(smp_processor_id(), 0); | ||
118 | raw_local_irq_disable_all(); | ||
119 | for (;;) | ||
120 | asm("nap"); | ||
121 | } | ||
122 | |||
123 | /* This function calls the 'stop' function on all other CPUs in the system. */ | ||
124 | void smp_send_stop(void) | ||
125 | { | ||
126 | stopping_cpus = 1; | ||
127 | send_IPI_allbutself(MSG_TAG_STOP_CPU); | ||
128 | } | ||
129 | |||
130 | |||
131 | /* | ||
132 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. | ||
133 | */ | ||
134 | void evaluate_message(int tag) | ||
135 | { | ||
136 | switch (tag) { | ||
137 | case MSG_TAG_START_CPU: /* Start up a cpu */ | ||
138 | smp_start_cpu_interrupt(); | ||
139 | break; | ||
140 | |||
141 | case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */ | ||
142 | smp_stop_cpu_interrupt(); | ||
143 | break; | ||
144 | |||
145 | case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */ | ||
146 | generic_smp_call_function_interrupt(); | ||
147 | break; | ||
148 | |||
149 | case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */ | ||
150 | generic_smp_call_function_single_interrupt(); | ||
151 | break; | ||
152 | |||
153 | default: | ||
154 | panic("Unknown IPI message tag %d", tag); | ||
155 | break; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | |||
160 | /* | ||
161 | * flush_icache_range() code uses smp_call_function(). | ||
162 | */ | ||
163 | |||
164 | struct ipi_flush { | ||
165 | unsigned long start; | ||
166 | unsigned long end; | ||
167 | }; | ||
168 | |||
169 | static void ipi_flush_icache_range(void *info) | ||
170 | { | ||
171 | struct ipi_flush *flush = (struct ipi_flush *) info; | ||
172 | __flush_icache_range(flush->start, flush->end); | ||
173 | } | ||
174 | |||
175 | void flush_icache_range(unsigned long start, unsigned long end) | ||
176 | { | ||
177 | struct ipi_flush flush = { start, end }; | ||
178 | preempt_disable(); | ||
179 | on_each_cpu(ipi_flush_icache_range, &flush, 1); | ||
180 | preempt_enable(); | ||
181 | } | ||
182 | |||
183 | |||
184 | /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ | ||
185 | static irqreturn_t handle_reschedule_ipi(int irq, void *token) | ||
186 | { | ||
187 | /* | ||
188 | * Nothing to do here; when we return from interrupt, the | ||
189 | * rescheduling will occur there. But do bump the interrupt | ||
190 | * profiler count in the meantime. | ||
191 | */ | ||
192 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
193 | |||
194 | return IRQ_HANDLED; | ||
195 | } | ||
196 | |||
197 | static struct irqaction resched_action = { | ||
198 | .handler = handle_reschedule_ipi, | ||
199 | .name = "resched", | ||
200 | .dev_id = handle_reschedule_ipi /* unique token */, | ||
201 | }; | ||
202 | |||
203 | void __init ipi_init(void) | ||
204 | { | ||
205 | #if CHIP_HAS_IPI() | ||
206 | int cpu; | ||
207 | /* Map IPI trigger MMIO addresses. */ | ||
208 | for_each_possible_cpu(cpu) { | ||
209 | HV_Coord tile; | ||
210 | HV_PTE pte; | ||
211 | unsigned long offset; | ||
212 | |||
213 | tile.x = cpu_x(cpu); | ||
214 | tile.y = cpu_y(cpu); | ||
215 | if (hv_get_ipi_pte(tile, 1, &pte) != 0) | ||
216 | panic("Failed to initialize IPI for cpu %d\n", cpu); | ||
217 | |||
218 | offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; | ||
219 | ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); | ||
220 | } | ||
221 | #endif | ||
222 | |||
223 | /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */ | ||
224 | tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU); | ||
225 | BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action)); | ||
226 | } | ||
227 | |||
228 | #if CHIP_HAS_IPI() | ||
229 | |||
230 | void smp_send_reschedule(int cpu) | ||
231 | { | ||
232 | WARN_ON(cpu_is_offline(cpu)); | ||
233 | |||
234 | /* | ||
235 | * We just want to do an MMIO store. The traditional writeq() | ||
236 | * functions aren't really correct here, since they're always | ||
237 | * directed at the PCI shim. For now, just do a raw store, | ||
238 | * casting away the __iomem attribute. | ||
239 | */ | ||
240 | ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0; | ||
241 | } | ||
242 | |||
243 | #else | ||
244 | |||
245 | void smp_send_reschedule(int cpu) | ||
246 | { | ||
247 | HV_Coord coord; | ||
248 | |||
249 | WARN_ON(cpu_is_offline(cpu)); | ||
250 | |||
251 | coord.y = cpu_y(cpu); | ||
252 | coord.x = cpu_x(cpu); | ||
253 | hv_trigger_ipi(coord, IRQ_RESCHEDULE); | ||
254 | } | ||
255 | |||
256 | #endif /* CHIP_HAS_IPI() */ | ||
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c new file mode 100644 index 000000000000..74d62d098edf --- /dev/null +++ b/arch/tile/kernel/smpboot.c | |||
@@ -0,0 +1,278 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/bootmem.h> | ||
23 | #include <linux/notifier.h> | ||
24 | #include <linux/cpu.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <asm/mmu_context.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | #include <asm/sections.h> | ||
32 | |||
33 | /* State of each CPU. */ | ||
34 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
35 | |||
36 | /* The messaging code jumps to this pointer during boot-up */ | ||
37 | unsigned long start_cpu_function_addr; | ||
38 | |||
39 | /* Called very early during startup to mark boot cpu as online */ | ||
40 | void __init smp_prepare_boot_cpu(void) | ||
41 | { | ||
42 | int cpu = smp_processor_id(); | ||
43 | set_cpu_online(cpu, 1); | ||
44 | set_cpu_present(cpu, 1); | ||
45 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
46 | |||
47 | init_messaging(); | ||
48 | } | ||
49 | |||
50 | static void start_secondary(void); | ||
51 | |||
52 | /* | ||
53 | * Called at the top of init() to launch all the other CPUs. | ||
54 | * They run free to complete their initialization and then wait | ||
55 | * until they get an IPI from the boot cpu to come online. | ||
56 | */ | ||
57 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
58 | { | ||
59 | long rc; | ||
60 | int cpu, cpu_count; | ||
61 | int boot_cpu = smp_processor_id(); | ||
62 | |||
63 | current_thread_info()->cpu = boot_cpu; | ||
64 | |||
65 | /* | ||
66 | * Pin this task to the boot CPU while we bring up the others, | ||
67 | * just to make sure we don't uselessly migrate as they come up. | ||
68 | */ | ||
69 | rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); | ||
70 | if (rc != 0) | ||
71 | pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); | ||
72 | |||
73 | /* Print information about disabled and dataplane cpus. */ | ||
74 | print_disabled_cpus(); | ||
75 | |||
76 | /* | ||
77 | * Tell the messaging subsystem how to respond to the | ||
78 | * startup message. We use a level of indirection to avoid | ||
79 | * confusing the linker with the fact that the messaging | ||
80 | * subsystem is calling __init code. | ||
81 | */ | ||
82 | start_cpu_function_addr = (unsigned long) &online_secondary; | ||
83 | |||
84 | /* Set up thread context for all new processors. */ | ||
85 | cpu_count = 1; | ||
86 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
87 | struct task_struct *idle; | ||
88 | |||
89 | if (cpu == boot_cpu) | ||
90 | continue; | ||
91 | |||
92 | if (!cpu_possible(cpu)) { | ||
93 | /* | ||
94 | * Make this processor do nothing on boot. | ||
95 | * Note that we don't give the boot_pc function | ||
96 | * a stack, so it has to be assembly code. | ||
97 | */ | ||
98 | per_cpu(boot_sp, cpu) = 0; | ||
99 | per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; | ||
100 | continue; | ||
101 | } | ||
102 | |||
103 | /* Create a new idle thread to run start_secondary() */ | ||
104 | idle = fork_idle(cpu); | ||
105 | if (IS_ERR(idle)) | ||
106 | panic("failed fork for CPU %d", cpu); | ||
107 | idle->thread.pc = (unsigned long) start_secondary; | ||
108 | |||
109 | /* Make this thread the boot thread for this processor */ | ||
110 | per_cpu(boot_sp, cpu) = task_ksp0(idle); | ||
111 | per_cpu(boot_pc, cpu) = idle->thread.pc; | ||
112 | |||
113 | ++cpu_count; | ||
114 | } | ||
115 | BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); | ||
116 | |||
117 | /* Fire up the other tiles, if any */ | ||
118 | init_cpu_present(cpu_possible_mask); | ||
119 | if (cpumask_weight(cpu_present_mask) > 1) { | ||
120 | mb(); /* make sure all data is visible to new processors */ | ||
121 | hv_start_all_tiles(); | ||
122 | } | ||
123 | } | ||
124 | |||
125 | static __initdata struct cpumask init_affinity; | ||
126 | |||
127 | static __init int reset_init_affinity(void) | ||
128 | { | ||
129 | long rc = sched_setaffinity(current->pid, &init_affinity); | ||
130 | if (rc != 0) | ||
131 | pr_warning("couldn't reset init affinity (%ld)\n", | ||
132 | rc); | ||
133 | return 0; | ||
134 | } | ||
135 | late_initcall(reset_init_affinity); | ||
136 | |||
137 | static struct cpumask cpu_started __cpuinitdata; | ||
138 | |||
139 | /* | ||
140 | * Activate a secondary processor. Very minimal; don't add anything | ||
141 | * to this path without knowing what you're doing, since SMP booting | ||
142 | * is pretty fragile. | ||
143 | */ | ||
144 | static void __cpuinit start_secondary(void) | ||
145 | { | ||
146 | int cpuid = smp_processor_id(); | ||
147 | |||
148 | /* Set our thread pointer appropriately. */ | ||
149 | set_my_cpu_offset(__per_cpu_offset[cpuid]); | ||
150 | |||
151 | preempt_disable(); | ||
152 | |||
153 | /* | ||
154 | * In large machines even this will slow us down, since we | ||
155 | * will be contending for for the printk spinlock. | ||
156 | */ | ||
157 | /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ | ||
158 | |||
159 | /* Initialize the current asid for our first page table. */ | ||
160 | __get_cpu_var(current_asid) = min_asid; | ||
161 | |||
162 | /* Set up this thread as another owner of the init_mm */ | ||
163 | atomic_inc(&init_mm.mm_count); | ||
164 | current->active_mm = &init_mm; | ||
165 | if (current->mm) | ||
166 | BUG(); | ||
167 | enter_lazy_tlb(&init_mm, current); | ||
168 | |||
169 | /* Allow hypervisor messages to be received */ | ||
170 | init_messaging(); | ||
171 | local_irq_enable(); | ||
172 | |||
173 | /* Indicate that we're ready to come up. */ | ||
174 | /* Must not do this before we're ready to receive messages */ | ||
175 | if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { | ||
176 | pr_warning("CPU#%d already started!\n", cpuid); | ||
177 | for (;;) | ||
178 | local_irq_enable(); | ||
179 | } | ||
180 | |||
181 | smp_nap(); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Bring a secondary processor online. | ||
186 | */ | ||
187 | void __cpuinit online_secondary(void) | ||
188 | { | ||
189 | /* | ||
190 | * low-memory mappings have been cleared, flush them from | ||
191 | * the local TLBs too. | ||
192 | */ | ||
193 | local_flush_tlb(); | ||
194 | |||
195 | BUG_ON(in_interrupt()); | ||
196 | |||
197 | /* This must be done before setting cpu_online_mask */ | ||
198 | wmb(); | ||
199 | |||
200 | /* | ||
201 | * We need to hold call_lock, so there is no inconsistency | ||
202 | * between the time smp_call_function() determines number of | ||
203 | * IPI recipients, and the time when the determination is made | ||
204 | * for which cpus receive the IPI. Holding this | ||
205 | * lock helps us to not include this cpu in a currently in progress | ||
206 | * smp_call_function(). | ||
207 | */ | ||
208 | ipi_call_lock(); | ||
209 | set_cpu_online(smp_processor_id(), 1); | ||
210 | ipi_call_unlock(); | ||
211 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
212 | |||
213 | /* Set up tile-specific state for this cpu. */ | ||
214 | setup_cpu(0); | ||
215 | |||
216 | /* Set up tile-timer clock-event device on this cpu */ | ||
217 | setup_tile_timer(); | ||
218 | |||
219 | preempt_enable(); | ||
220 | |||
221 | cpu_idle(); | ||
222 | } | ||
223 | |||
224 | int __cpuinit __cpu_up(unsigned int cpu) | ||
225 | { | ||
226 | /* Wait 5s total for all CPUs for them to come online */ | ||
227 | static int timeout; | ||
228 | for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { | ||
229 | if (timeout >= 50000) { | ||
230 | pr_info("skipping unresponsive cpu%d\n", cpu); | ||
231 | local_irq_enable(); | ||
232 | return -EIO; | ||
233 | } | ||
234 | udelay(100); | ||
235 | } | ||
236 | |||
237 | local_irq_enable(); | ||
238 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
239 | |||
240 | /* Unleash the CPU! */ | ||
241 | send_IPI_single(cpu, MSG_TAG_START_CPU); | ||
242 | while (!cpumask_test_cpu(cpu, cpu_online_mask)) | ||
243 | cpu_relax(); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static void panic_start_cpu(void) | ||
248 | { | ||
249 | panic("Received a MSG_START_CPU IPI after boot finished."); | ||
250 | } | ||
251 | |||
252 | void __init smp_cpus_done(unsigned int max_cpus) | ||
253 | { | ||
254 | int cpu, next, rc; | ||
255 | |||
256 | /* Reset the response to a (now illegal) MSG_START_CPU IPI. */ | ||
257 | start_cpu_function_addr = (unsigned long) &panic_start_cpu; | ||
258 | |||
259 | cpumask_copy(&init_affinity, cpu_online_mask); | ||
260 | |||
261 | /* | ||
262 | * Pin ourselves to a single cpu in the initial affinity set | ||
263 | * so that kernel mappings for the rootfs are not in the dataplane, | ||
264 | * if set, and to avoid unnecessary migrating during bringup. | ||
265 | * Use the last cpu just in case the whole chip has been | ||
266 | * isolated from the scheduler, to keep init away from likely | ||
267 | * more useful user code. This also ensures that work scheduled | ||
268 | * via schedule_delayed_work() in the init routines will land | ||
269 | * on this cpu. | ||
270 | */ | ||
271 | for (cpu = cpumask_first(&init_affinity); | ||
272 | (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; | ||
273 | cpu = next) | ||
274 | ; | ||
275 | rc = sched_setaffinity(current->pid, cpumask_of(cpu)); | ||
276 | if (rc != 0) | ||
277 | pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); | ||
278 | } | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c new file mode 100644 index 000000000000..b6268d3ae869 --- /dev/null +++ b/arch/tile/kernel/stack.c | |||
@@ -0,0 +1,486 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/pfn.h> | ||
20 | #include <linux/kallsyms.h> | ||
21 | #include <linux/stacktrace.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/mmzone.h> | ||
24 | #include <asm/backtrace.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/ucontext.h> | ||
28 | #include <asm/sigframe.h> | ||
29 | #include <asm/stack.h> | ||
30 | #include <arch/abi.h> | ||
31 | #include <arch/interrupts.h> | ||
32 | |||
33 | |||
34 | /* Is address on the specified kernel stack? */ | ||
35 | static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) | ||
36 | { | ||
37 | ulong kstack_base = (ulong) kbt->task->stack; | ||
38 | if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ | ||
39 | return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; | ||
40 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | ||
41 | } | ||
42 | |||
43 | /* Is address in the specified kernel code? */ | ||
44 | static int in_kernel_text(VirtualAddress address) | ||
45 | { | ||
46 | return (address >= MEM_SV_INTRPT && | ||
47 | address < MEM_SV_INTRPT + HPAGE_SIZE); | ||
48 | } | ||
49 | |||
50 | /* Is address valid for reading? */ | ||
51 | static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) | ||
52 | { | ||
53 | HV_PTE *l1_pgtable = kbt->pgtable; | ||
54 | HV_PTE *l2_pgtable; | ||
55 | unsigned long pfn; | ||
56 | HV_PTE pte; | ||
57 | struct page *page; | ||
58 | |||
59 | if (l1_pgtable == NULL) | ||
60 | return 0; /* can't read user space in other tasks */ | ||
61 | |||
62 | pte = l1_pgtable[HV_L1_INDEX(address)]; | ||
63 | if (!hv_pte_get_present(pte)) | ||
64 | return 0; | ||
65 | pfn = hv_pte_get_pfn(pte); | ||
66 | if (pte_huge(pte)) { | ||
67 | if (!pfn_valid(pfn)) { | ||
68 | pr_err("huge page has bad pfn %#lx\n", pfn); | ||
69 | return 0; | ||
70 | } | ||
71 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
72 | } | ||
73 | |||
74 | page = pfn_to_page(pfn); | ||
75 | if (PageHighMem(page)) { | ||
76 | pr_err("L2 page table not in LOWMEM (%#llx)\n", | ||
77 | HV_PFN_TO_CPA(pfn)); | ||
78 | return 0; | ||
79 | } | ||
80 | l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
81 | pte = l2_pgtable[HV_L2_INDEX(address)]; | ||
82 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
83 | } | ||
84 | |||
85 | /* Callback for backtracer; basically a glorified memcpy */ | ||
86 | static bool read_memory_func(void *result, VirtualAddress address, | ||
87 | unsigned int size, void *vkbt) | ||
88 | { | ||
89 | int retval; | ||
90 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | ||
91 | if (in_kernel_text(address)) { | ||
92 | /* OK to read kernel code. */ | ||
93 | } else if (address >= PAGE_OFFSET) { | ||
94 | /* We only tolerate kernel-space reads of this task's stack */ | ||
95 | if (!in_kernel_stack(kbt, address)) | ||
96 | return 0; | ||
97 | } else if (!valid_address(kbt, address)) { | ||
98 | return 0; /* invalid user-space address */ | ||
99 | } | ||
100 | pagefault_disable(); | ||
101 | retval = __copy_from_user_inatomic(result, | ||
102 | (void __user __force *)address, | ||
103 | size); | ||
104 | pagefault_enable(); | ||
105 | return (retval == 0); | ||
106 | } | ||
107 | |||
108 | /* Return a pt_regs pointer for a valid fault handler frame */ | ||
109 | static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | ||
110 | { | ||
111 | #ifndef __tilegx__ | ||
112 | const char *fault = NULL; /* happy compiler */ | ||
113 | char fault_buf[64]; | ||
114 | VirtualAddress sp = kbt->it.sp; | ||
115 | struct pt_regs *p; | ||
116 | |||
117 | if (!in_kernel_stack(kbt, sp)) | ||
118 | return NULL; | ||
119 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | ||
120 | return NULL; | ||
121 | p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); | ||
122 | if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) | ||
123 | fault = "syscall"; | ||
124 | else { | ||
125 | if (kbt->verbose) { /* else we aren't going to use it */ | ||
126 | snprintf(fault_buf, sizeof(fault_buf), | ||
127 | "interrupt %ld", p->faultnum); | ||
128 | fault = fault_buf; | ||
129 | } | ||
130 | } | ||
131 | if (EX1_PL(p->ex1) == KERNEL_PL && | ||
132 | in_kernel_text(p->pc) && | ||
133 | in_kernel_stack(kbt, p->sp) && | ||
134 | p->sp >= sp) { | ||
135 | if (kbt->verbose) | ||
136 | pr_err(" <%s while in kernel mode>\n", fault); | ||
137 | } else if (EX1_PL(p->ex1) == USER_PL && | ||
138 | p->pc < PAGE_OFFSET && | ||
139 | p->sp < PAGE_OFFSET) { | ||
140 | if (kbt->verbose) | ||
141 | pr_err(" <%s while in user mode>\n", fault); | ||
142 | } else if (kbt->verbose) { | ||
143 | pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", | ||
144 | p->pc, p->sp, p->ex1); | ||
145 | p = NULL; | ||
146 | } | ||
147 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | ||
148 | return p; | ||
149 | #endif | ||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | /* Is the pc pointing to a sigreturn trampoline? */ | ||
154 | static int is_sigreturn(VirtualAddress pc) | ||
155 | { | ||
156 | return (pc == VDSO_BASE); | ||
157 | } | ||
158 | |||
159 | /* Return a pt_regs pointer for a valid signal handler frame */ | ||
160 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | ||
161 | { | ||
162 | BacktraceIterator *b = &kbt->it; | ||
163 | |||
164 | if (b->pc == VDSO_BASE) { | ||
165 | struct rt_sigframe *frame; | ||
166 | unsigned long sigframe_top = | ||
167 | b->sp + sizeof(struct rt_sigframe) - 1; | ||
168 | if (!valid_address(kbt, b->sp) || | ||
169 | !valid_address(kbt, sigframe_top)) { | ||
170 | if (kbt->verbose) | ||
171 | pr_err(" (odd signal: sp %#lx?)\n", | ||
172 | (unsigned long)(b->sp)); | ||
173 | return NULL; | ||
174 | } | ||
175 | frame = (struct rt_sigframe *)b->sp; | ||
176 | if (kbt->verbose) { | ||
177 | pr_err(" <received signal %d>\n", | ||
178 | frame->info.si_signo); | ||
179 | } | ||
180 | return &frame->uc.uc_mcontext.regs; | ||
181 | } | ||
182 | return NULL; | ||
183 | } | ||
184 | |||
185 | static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | ||
186 | { | ||
187 | return is_sigreturn(kbt->it.pc); | ||
188 | } | ||
189 | |||
190 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | ||
191 | { | ||
192 | struct pt_regs *p; | ||
193 | |||
194 | p = valid_fault_handler(kbt); | ||
195 | if (p == NULL) | ||
196 | p = valid_sigframe(kbt); | ||
197 | if (p == NULL) | ||
198 | return 0; | ||
199 | backtrace_init(&kbt->it, read_memory_func, kbt, | ||
200 | p->pc, p->lr, p->sp, p->regs[52]); | ||
201 | kbt->new_context = 1; | ||
202 | return 1; | ||
203 | } | ||
204 | |||
205 | /* Find a frame that isn't a sigreturn, if there is one. */ | ||
206 | static int KBacktraceIterator_next_item_inclusive( | ||
207 | struct KBacktraceIterator *kbt) | ||
208 | { | ||
209 | for (;;) { | ||
210 | do { | ||
211 | if (!KBacktraceIterator_is_sigreturn(kbt)) | ||
212 | return 1; | ||
213 | } while (backtrace_next(&kbt->it)); | ||
214 | |||
215 | if (!KBacktraceIterator_restart(kbt)) | ||
216 | return 0; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * If the current sp is on a page different than what we recorded | ||
222 | * as the top-of-kernel-stack last time we context switched, we have | ||
223 | * probably blown the stack, and nothing is going to work out well. | ||
224 | * If we can at least get out a warning, that may help the debug, | ||
225 | * though we probably won't be able to backtrace into the code that | ||
226 | * actually did the recursive damage. | ||
227 | */ | ||
228 | static void validate_stack(struct pt_regs *regs) | ||
229 | { | ||
230 | int cpu = smp_processor_id(); | ||
231 | unsigned long ksp0 = get_current_ksp0(); | ||
232 | unsigned long ksp0_base = ksp0 - THREAD_SIZE; | ||
233 | unsigned long sp = stack_pointer; | ||
234 | |||
235 | if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { | ||
236 | pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" | ||
237 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | ||
238 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | ||
239 | } | ||
240 | |||
241 | else if (sp < ksp0_base + sizeof(struct thread_info)) { | ||
242 | pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" | ||
243 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | ||
244 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
249 | struct task_struct *t, struct pt_regs *regs) | ||
250 | { | ||
251 | VirtualAddress pc, lr, sp, r52; | ||
252 | int is_current; | ||
253 | |||
254 | /* | ||
255 | * Set up callback information. We grab the kernel stack base | ||
256 | * so we will allow reads of that address range, and if we're | ||
257 | * asking about the current process we grab the page table | ||
258 | * so we can check user accesses before trying to read them. | ||
259 | * We flush the TLB to avoid any weird skew issues. | ||
260 | */ | ||
261 | is_current = (t == NULL); | ||
262 | kbt->is_current = is_current; | ||
263 | if (is_current) | ||
264 | t = validate_current(); | ||
265 | kbt->task = t; | ||
266 | kbt->pgtable = NULL; | ||
267 | kbt->verbose = 0; /* override in caller if desired */ | ||
268 | kbt->profile = 0; /* override in caller if desired */ | ||
269 | kbt->end = 0; | ||
270 | kbt->new_context = 0; | ||
271 | if (is_current) { | ||
272 | HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; | ||
273 | if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { | ||
274 | /* | ||
275 | * Not just an optimization: this also allows | ||
276 | * this to work at all before va/pa mappings | ||
277 | * are set up. | ||
278 | */ | ||
279 | kbt->pgtable = swapper_pg_dir; | ||
280 | } else { | ||
281 | struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); | ||
282 | if (!PageHighMem(page)) | ||
283 | kbt->pgtable = __va(pgdir_pa); | ||
284 | else | ||
285 | pr_err("page table not in LOWMEM" | ||
286 | " (%#llx)\n", pgdir_pa); | ||
287 | } | ||
288 | local_flush_tlb_all(); | ||
289 | validate_stack(regs); | ||
290 | } | ||
291 | |||
292 | if (regs == NULL) { | ||
293 | if (is_current || t->state == TASK_RUNNING) { | ||
294 | /* Can't do this; we need registers */ | ||
295 | kbt->end = 1; | ||
296 | return; | ||
297 | } | ||
298 | pc = get_switch_to_pc(); | ||
299 | lr = t->thread.pc; | ||
300 | sp = t->thread.ksp; | ||
301 | r52 = 0; | ||
302 | } else { | ||
303 | pc = regs->pc; | ||
304 | lr = regs->lr; | ||
305 | sp = regs->sp; | ||
306 | r52 = regs->regs[52]; | ||
307 | } | ||
308 | |||
309 | backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); | ||
310 | kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); | ||
311 | } | ||
312 | EXPORT_SYMBOL(KBacktraceIterator_init); | ||
313 | |||
314 | int KBacktraceIterator_end(struct KBacktraceIterator *kbt) | ||
315 | { | ||
316 | return kbt->end; | ||
317 | } | ||
318 | EXPORT_SYMBOL(KBacktraceIterator_end); | ||
319 | |||
320 | void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | ||
321 | { | ||
322 | kbt->new_context = 0; | ||
323 | if (!backtrace_next(&kbt->it) && | ||
324 | !KBacktraceIterator_restart(kbt)) { | ||
325 | kbt->end = 1; | ||
326 | return; | ||
327 | } | ||
328 | |||
329 | kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); | ||
330 | } | ||
331 | EXPORT_SYMBOL(KBacktraceIterator_next); | ||
332 | |||
333 | /* | ||
334 | * This method wraps the backtracer's more generic support. | ||
335 | * It is only invoked from the architecture-specific code; show_stack() | ||
336 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
337 | */ | ||
338 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | ||
339 | { | ||
340 | int i; | ||
341 | |||
342 | if (headers) { | ||
343 | /* | ||
344 | * Add a blank line since if we are called from panic(), | ||
345 | * then bust_spinlocks() spit out a space in front of us | ||
346 | * and it will mess up our KERN_ERR. | ||
347 | */ | ||
348 | pr_err("\n"); | ||
349 | pr_err("Starting stack dump of tid %d, pid %d (%s)" | ||
350 | " on cpu %d at cycle %lld\n", | ||
351 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, | ||
352 | smp_processor_id(), get_cycles()); | ||
353 | } | ||
354 | #ifdef __tilegx__ | ||
355 | if (kbt->is_current) { | ||
356 | __insn_mtspr(SPR_SIM_CONTROL, | ||
357 | SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE)); | ||
358 | } | ||
359 | #endif | ||
360 | kbt->verbose = 1; | ||
361 | i = 0; | ||
362 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | ||
363 | char *modname; | ||
364 | const char *name; | ||
365 | unsigned long address = kbt->it.pc; | ||
366 | unsigned long offset, size; | ||
367 | char namebuf[KSYM_NAME_LEN+100]; | ||
368 | |||
369 | if (address >= PAGE_OFFSET) | ||
370 | name = kallsyms_lookup(address, &size, &offset, | ||
371 | &modname, namebuf); | ||
372 | else | ||
373 | name = NULL; | ||
374 | |||
375 | if (!name) | ||
376 | namebuf[0] = '\0'; | ||
377 | else { | ||
378 | size_t namelen = strlen(namebuf); | ||
379 | size_t remaining = (sizeof(namebuf) - 1) - namelen; | ||
380 | char *p = namebuf + namelen; | ||
381 | int rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
382 | offset, size); | ||
383 | if (modname && rc < remaining) | ||
384 | snprintf(p + rc, remaining - rc, | ||
385 | "[%s] ", modname); | ||
386 | namebuf[sizeof(namebuf)-1] = '\0'; | ||
387 | } | ||
388 | |||
389 | pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", | ||
390 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | ||
391 | |||
392 | if (i >= 100) { | ||
393 | pr_err("Stack dump truncated" | ||
394 | " (%d frames)\n", i); | ||
395 | break; | ||
396 | } | ||
397 | } | ||
398 | if (headers) | ||
399 | pr_err("Stack dump complete\n"); | ||
400 | } | ||
401 | EXPORT_SYMBOL(tile_show_stack); | ||
402 | |||
403 | |||
404 | /* This is called from show_regs() and _dump_stack() */ | ||
405 | void dump_stack_regs(struct pt_regs *regs) | ||
406 | { | ||
407 | struct KBacktraceIterator kbt; | ||
408 | KBacktraceIterator_init(&kbt, NULL, regs); | ||
409 | tile_show_stack(&kbt, 1); | ||
410 | } | ||
411 | EXPORT_SYMBOL(dump_stack_regs); | ||
412 | |||
413 | static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, | ||
414 | ulong pc, ulong lr, ulong sp, ulong r52) | ||
415 | { | ||
416 | memset(regs, 0, sizeof(struct pt_regs)); | ||
417 | regs->pc = pc; | ||
418 | regs->lr = lr; | ||
419 | regs->sp = sp; | ||
420 | regs->regs[52] = r52; | ||
421 | return regs; | ||
422 | } | ||
423 | |||
424 | /* This is called from dump_stack() and just converts to pt_regs */ | ||
425 | void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | ||
426 | { | ||
427 | struct pt_regs regs; | ||
428 | dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52)); | ||
429 | } | ||
430 | |||
431 | /* This is called from KBacktraceIterator_init_current() */ | ||
432 | void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, | ||
433 | ulong lr, ulong sp, ulong r52) | ||
434 | { | ||
435 | struct pt_regs regs; | ||
436 | KBacktraceIterator_init(kbt, NULL, | ||
437 | regs_to_pt_regs(®s, pc, lr, sp, r52)); | ||
438 | } | ||
439 | |||
440 | /* This is called only from kernel/sched.c, with esp == NULL */ | ||
441 | void show_stack(struct task_struct *task, unsigned long *esp) | ||
442 | { | ||
443 | struct KBacktraceIterator kbt; | ||
444 | if (task == NULL || task == current) | ||
445 | KBacktraceIterator_init_current(&kbt); | ||
446 | else | ||
447 | KBacktraceIterator_init(&kbt, task, NULL); | ||
448 | tile_show_stack(&kbt, 0); | ||
449 | } | ||
450 | |||
451 | #ifdef CONFIG_STACKTRACE | ||
452 | |||
453 | /* Support generic Linux stack API too */ | ||
454 | |||
455 | void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) | ||
456 | { | ||
457 | struct KBacktraceIterator kbt; | ||
458 | int skip = trace->skip; | ||
459 | int i = 0; | ||
460 | |||
461 | if (task == NULL || task == current) | ||
462 | KBacktraceIterator_init_current(&kbt); | ||
463 | else | ||
464 | KBacktraceIterator_init(&kbt, task, NULL); | ||
465 | for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { | ||
466 | if (skip) { | ||
467 | --skip; | ||
468 | continue; | ||
469 | } | ||
470 | if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) | ||
471 | break; | ||
472 | trace->entries[i++] = kbt.it.pc; | ||
473 | } | ||
474 | trace->nr_entries = i; | ||
475 | } | ||
476 | EXPORT_SYMBOL(save_stack_trace_tsk); | ||
477 | |||
478 | void save_stack_trace(struct stack_trace *trace) | ||
479 | { | ||
480 | save_stack_trace_tsk(NULL, trace); | ||
481 | } | ||
482 | |||
483 | #endif | ||
484 | |||
485 | /* In entry.S */ | ||
486 | EXPORT_SYMBOL(KBacktraceIterator_init_current); | ||
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c new file mode 100644 index 000000000000..f0f87eab8c39 --- /dev/null +++ b/arch/tile/kernel/sys.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains various random system calls that | ||
15 | * have a non-standard calling sequence on the Linux/TILE | ||
16 | * platform. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/file.h> | ||
27 | #include <linux/mempolicy.h> | ||
28 | #include <linux/binfmts.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/compat.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <linux/signal.h> | ||
33 | #include <asm/syscalls.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/homecache.h> | ||
36 | #include <arch/chip.h> | ||
37 | |||
38 | SYSCALL_DEFINE0(flush_cache) | ||
39 | { | ||
40 | homecache_evict(cpumask_of(smp_processor_id())); | ||
41 | return 0; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Syscalls that pass 64-bit values on 32-bit systems normally | ||
46 | * pass them as (low,high) word packed into the immediately adjacent | ||
47 | * registers. If the low word naturally falls on an even register, | ||
48 | * our ABI makes it work correctly; if not, we adjust it here. | ||
49 | * Handling it here means we don't have to fix uclibc AND glibc AND | ||
50 | * any other standard libcs we want to support. | ||
51 | */ | ||
52 | |||
53 | #if !defined(__tilegx__) || defined(CONFIG_COMPAT) | ||
54 | |||
55 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count) | ||
56 | { | ||
57 | return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); | ||
58 | } | ||
59 | |||
60 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
61 | u32 len, int advice) | ||
62 | { | ||
63 | return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
64 | len, advice); | ||
65 | } | ||
66 | |||
67 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
68 | u32 len_lo, u32 len_hi, int advice) | ||
69 | { | ||
70 | return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
71 | ((loff_t)len_hi << 32) | len_lo, advice); | ||
72 | } | ||
73 | |||
74 | #endif /* 32-bit syscall wrappers */ | ||
75 | |||
76 | /* Note: used by the compat code even in 64-bit Linux. */ | ||
77 | SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | ||
78 | unsigned long, prot, unsigned long, flags, | ||
79 | unsigned long, fd, unsigned long, off_4k) | ||
80 | { | ||
81 | #define PAGE_ADJUST (PAGE_SHIFT - 12) | ||
82 | if (off_4k & ((1 << PAGE_ADJUST) - 1)) | ||
83 | return -EINVAL; | ||
84 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
85 | off_4k >> PAGE_ADJUST); | ||
86 | } | ||
87 | |||
88 | #ifdef __tilegx__ | ||
89 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | ||
90 | unsigned long, prot, unsigned long, flags, | ||
91 | unsigned long, fd, off_t, offset) | ||
92 | { | ||
93 | if (offset & ((1 << PAGE_SHIFT) - 1)) | ||
94 | return -EINVAL; | ||
95 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
96 | offset >> PAGE_SHIFT); | ||
97 | } | ||
98 | #endif | ||
99 | |||
100 | |||
101 | /* Provide the actual syscall number to call mapping. */ | ||
102 | #undef __SYSCALL | ||
103 | #define __SYSCALL(nr, call) [nr] = (call), | ||
104 | |||
105 | #ifndef __tilegx__ | ||
106 | /* See comments at the top of the file. */ | ||
107 | #define sys_fadvise64 sys32_fadvise64 | ||
108 | #define sys_fadvise64_64 sys32_fadvise64_64 | ||
109 | #define sys_readahead sys32_readahead | ||
110 | #define sys_sync_file_range sys_sync_file_range2 | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * Note that we can't include <linux/unistd.h> here since the header | ||
115 | * guard will defeat us; <asm/unistd.h> checks for __SYSCALL as well. | ||
116 | */ | ||
117 | void *sys_call_table[__NR_syscalls] = { | ||
118 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
119 | #include <asm/unistd.h> | ||
120 | }; | ||
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c new file mode 100644 index 000000000000..69af0e150f78 --- /dev/null +++ b/arch/tile/kernel/tile-desc_32.c | |||
@@ -0,0 +1,2498 @@ | |||
1 | /* This define is BFD_RELOC_##x for real bfd, or -1 for everyone else. */ | ||
2 | #define BFD_RELOC(x) -1 | ||
3 | |||
4 | /* Special registers. */ | ||
5 | #define TREG_LR 55 | ||
6 | #define TREG_SN 56 | ||
7 | #define TREG_ZERO 63 | ||
8 | |||
9 | /* FIXME: Rename this. */ | ||
10 | #include <asm/opcode-tile.h> | ||
11 | |||
12 | #include <linux/stddef.h> | ||
13 | |||
14 | const struct tile_opcode tile_opcodes[395] = | ||
15 | { | ||
16 | { "bpt", TILE_OPC_BPT, 0x2, 0, TREG_ZERO, 0, | ||
17 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
18 | }, | ||
19 | { "info", TILE_OPC_INFO, 0xf, 1, TREG_ZERO, 1, | ||
20 | { { 0 }, { 1 }, { 2 }, { 3 }, { 0, } }, | ||
21 | }, | ||
22 | { "infol", TILE_OPC_INFOL, 0x3, 1, TREG_ZERO, 1, | ||
23 | { { 4 }, { 5 }, { 0, }, { 0, }, { 0, } }, | ||
24 | }, | ||
25 | { "j", TILE_OPC_J, 0x2, 1, TREG_ZERO, 1, | ||
26 | { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, | ||
27 | }, | ||
28 | { "jal", TILE_OPC_JAL, 0x2, 1, TREG_LR, 1, | ||
29 | { { 0, }, { 6 }, { 0, }, { 0, }, { 0, } }, | ||
30 | }, | ||
31 | { "move", TILE_OPC_MOVE, 0xf, 2, TREG_ZERO, 1, | ||
32 | { { 7, 8 }, { 9, 10 }, { 11, 12 }, { 13, 14 }, { 0, } }, | ||
33 | }, | ||
34 | { "move.sn", TILE_OPC_MOVE_SN, 0x3, 2, TREG_SN, 1, | ||
35 | { { 7, 8 }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
36 | }, | ||
37 | { "movei", TILE_OPC_MOVEI, 0xf, 2, TREG_ZERO, 1, | ||
38 | { { 7, 0 }, { 9, 1 }, { 11, 2 }, { 13, 3 }, { 0, } }, | ||
39 | }, | ||
40 | { "movei.sn", TILE_OPC_MOVEI_SN, 0x3, 2, TREG_SN, 1, | ||
41 | { { 7, 0 }, { 9, 1 }, { 0, }, { 0, }, { 0, } }, | ||
42 | }, | ||
43 | { "moveli", TILE_OPC_MOVELI, 0x3, 2, TREG_ZERO, 1, | ||
44 | { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, | ||
45 | }, | ||
46 | { "moveli.sn", TILE_OPC_MOVELI_SN, 0x3, 2, TREG_SN, 1, | ||
47 | { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, | ||
48 | }, | ||
49 | { "movelis", TILE_OPC_MOVELIS, 0x3, 2, TREG_SN, 1, | ||
50 | { { 7, 4 }, { 9, 5 }, { 0, }, { 0, }, { 0, } }, | ||
51 | }, | ||
52 | { "prefetch", TILE_OPC_PREFETCH, 0x12, 1, TREG_ZERO, 1, | ||
53 | { { 0, }, { 10 }, { 0, }, { 0, }, { 15 } }, | ||
54 | }, | ||
55 | { "raise", TILE_OPC_RAISE, 0x2, 0, TREG_ZERO, 1, | ||
56 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
57 | }, | ||
58 | { "add", TILE_OPC_ADD, 0xf, 3, TREG_ZERO, 1, | ||
59 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
60 | }, | ||
61 | { "add.sn", TILE_OPC_ADD_SN, 0x3, 3, TREG_SN, 1, | ||
62 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
63 | }, | ||
64 | { "addb", TILE_OPC_ADDB, 0x3, 3, TREG_ZERO, 1, | ||
65 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
66 | }, | ||
67 | { "addb.sn", TILE_OPC_ADDB_SN, 0x3, 3, TREG_SN, 1, | ||
68 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
69 | }, | ||
70 | { "addbs_u", TILE_OPC_ADDBS_U, 0x3, 3, TREG_ZERO, 1, | ||
71 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
72 | }, | ||
73 | { "addbs_u.sn", TILE_OPC_ADDBS_U_SN, 0x3, 3, TREG_SN, 1, | ||
74 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
75 | }, | ||
76 | { "addh", TILE_OPC_ADDH, 0x3, 3, TREG_ZERO, 1, | ||
77 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
78 | }, | ||
79 | { "addh.sn", TILE_OPC_ADDH_SN, 0x3, 3, TREG_SN, 1, | ||
80 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
81 | }, | ||
82 | { "addhs", TILE_OPC_ADDHS, 0x3, 3, TREG_ZERO, 1, | ||
83 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
84 | }, | ||
85 | { "addhs.sn", TILE_OPC_ADDHS_SN, 0x3, 3, TREG_SN, 1, | ||
86 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
87 | }, | ||
88 | { "addi", TILE_OPC_ADDI, 0xf, 3, TREG_ZERO, 1, | ||
89 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
90 | }, | ||
91 | { "addi.sn", TILE_OPC_ADDI_SN, 0x3, 3, TREG_SN, 1, | ||
92 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
93 | }, | ||
94 | { "addib", TILE_OPC_ADDIB, 0x3, 3, TREG_ZERO, 1, | ||
95 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
96 | }, | ||
97 | { "addib.sn", TILE_OPC_ADDIB_SN, 0x3, 3, TREG_SN, 1, | ||
98 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
99 | }, | ||
100 | { "addih", TILE_OPC_ADDIH, 0x3, 3, TREG_ZERO, 1, | ||
101 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
102 | }, | ||
103 | { "addih.sn", TILE_OPC_ADDIH_SN, 0x3, 3, TREG_SN, 1, | ||
104 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
105 | }, | ||
106 | { "addli", TILE_OPC_ADDLI, 0x3, 3, TREG_ZERO, 1, | ||
107 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
108 | }, | ||
109 | { "addli.sn", TILE_OPC_ADDLI_SN, 0x3, 3, TREG_SN, 1, | ||
110 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
111 | }, | ||
112 | { "addlis", TILE_OPC_ADDLIS, 0x3, 3, TREG_SN, 1, | ||
113 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
114 | }, | ||
115 | { "adds", TILE_OPC_ADDS, 0x3, 3, TREG_ZERO, 1, | ||
116 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
117 | }, | ||
118 | { "adds.sn", TILE_OPC_ADDS_SN, 0x3, 3, TREG_SN, 1, | ||
119 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
120 | }, | ||
121 | { "adiffb_u", TILE_OPC_ADIFFB_U, 0x1, 3, TREG_ZERO, 1, | ||
122 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
123 | }, | ||
124 | { "adiffb_u.sn", TILE_OPC_ADIFFB_U_SN, 0x1, 3, TREG_SN, 1, | ||
125 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
126 | }, | ||
127 | { "adiffh", TILE_OPC_ADIFFH, 0x1, 3, TREG_ZERO, 1, | ||
128 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
129 | }, | ||
130 | { "adiffh.sn", TILE_OPC_ADIFFH_SN, 0x1, 3, TREG_SN, 1, | ||
131 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
132 | }, | ||
133 | { "and", TILE_OPC_AND, 0xf, 3, TREG_ZERO, 1, | ||
134 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
135 | }, | ||
136 | { "and.sn", TILE_OPC_AND_SN, 0x3, 3, TREG_SN, 1, | ||
137 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
138 | }, | ||
139 | { "andi", TILE_OPC_ANDI, 0xf, 3, TREG_ZERO, 1, | ||
140 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
141 | }, | ||
142 | { "andi.sn", TILE_OPC_ANDI_SN, 0x3, 3, TREG_SN, 1, | ||
143 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
144 | }, | ||
145 | { "auli", TILE_OPC_AULI, 0x3, 3, TREG_ZERO, 1, | ||
146 | { { 7, 8, 4 }, { 9, 10, 5 }, { 0, }, { 0, }, { 0, } }, | ||
147 | }, | ||
148 | { "avgb_u", TILE_OPC_AVGB_U, 0x1, 3, TREG_ZERO, 1, | ||
149 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
150 | }, | ||
151 | { "avgb_u.sn", TILE_OPC_AVGB_U_SN, 0x1, 3, TREG_SN, 1, | ||
152 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
153 | }, | ||
154 | { "avgh", TILE_OPC_AVGH, 0x1, 3, TREG_ZERO, 1, | ||
155 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
156 | }, | ||
157 | { "avgh.sn", TILE_OPC_AVGH_SN, 0x1, 3, TREG_SN, 1, | ||
158 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
159 | }, | ||
160 | { "bbns", TILE_OPC_BBNS, 0x2, 2, TREG_ZERO, 1, | ||
161 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
162 | }, | ||
163 | { "bbns.sn", TILE_OPC_BBNS_SN, 0x2, 2, TREG_SN, 1, | ||
164 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
165 | }, | ||
166 | { "bbnst", TILE_OPC_BBNST, 0x2, 2, TREG_ZERO, 1, | ||
167 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
168 | }, | ||
169 | { "bbnst.sn", TILE_OPC_BBNST_SN, 0x2, 2, TREG_SN, 1, | ||
170 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
171 | }, | ||
172 | { "bbs", TILE_OPC_BBS, 0x2, 2, TREG_ZERO, 1, | ||
173 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
174 | }, | ||
175 | { "bbs.sn", TILE_OPC_BBS_SN, 0x2, 2, TREG_SN, 1, | ||
176 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
177 | }, | ||
178 | { "bbst", TILE_OPC_BBST, 0x2, 2, TREG_ZERO, 1, | ||
179 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
180 | }, | ||
181 | { "bbst.sn", TILE_OPC_BBST_SN, 0x2, 2, TREG_SN, 1, | ||
182 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
183 | }, | ||
184 | { "bgez", TILE_OPC_BGEZ, 0x2, 2, TREG_ZERO, 1, | ||
185 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
186 | }, | ||
187 | { "bgez.sn", TILE_OPC_BGEZ_SN, 0x2, 2, TREG_SN, 1, | ||
188 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
189 | }, | ||
190 | { "bgezt", TILE_OPC_BGEZT, 0x2, 2, TREG_ZERO, 1, | ||
191 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
192 | }, | ||
193 | { "bgezt.sn", TILE_OPC_BGEZT_SN, 0x2, 2, TREG_SN, 1, | ||
194 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
195 | }, | ||
196 | { "bgz", TILE_OPC_BGZ, 0x2, 2, TREG_ZERO, 1, | ||
197 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
198 | }, | ||
199 | { "bgz.sn", TILE_OPC_BGZ_SN, 0x2, 2, TREG_SN, 1, | ||
200 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
201 | }, | ||
202 | { "bgzt", TILE_OPC_BGZT, 0x2, 2, TREG_ZERO, 1, | ||
203 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
204 | }, | ||
205 | { "bgzt.sn", TILE_OPC_BGZT_SN, 0x2, 2, TREG_SN, 1, | ||
206 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
207 | }, | ||
208 | { "bitx", TILE_OPC_BITX, 0x5, 2, TREG_ZERO, 1, | ||
209 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
210 | }, | ||
211 | { "bitx.sn", TILE_OPC_BITX_SN, 0x1, 2, TREG_SN, 1, | ||
212 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
213 | }, | ||
214 | { "blez", TILE_OPC_BLEZ, 0x2, 2, TREG_ZERO, 1, | ||
215 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
216 | }, | ||
217 | { "blez.sn", TILE_OPC_BLEZ_SN, 0x2, 2, TREG_SN, 1, | ||
218 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
219 | }, | ||
220 | { "blezt", TILE_OPC_BLEZT, 0x2, 2, TREG_ZERO, 1, | ||
221 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
222 | }, | ||
223 | { "blezt.sn", TILE_OPC_BLEZT_SN, 0x2, 2, TREG_SN, 1, | ||
224 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
225 | }, | ||
226 | { "blz", TILE_OPC_BLZ, 0x2, 2, TREG_ZERO, 1, | ||
227 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
228 | }, | ||
229 | { "blz.sn", TILE_OPC_BLZ_SN, 0x2, 2, TREG_SN, 1, | ||
230 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
231 | }, | ||
232 | { "blzt", TILE_OPC_BLZT, 0x2, 2, TREG_ZERO, 1, | ||
233 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
234 | }, | ||
235 | { "blzt.sn", TILE_OPC_BLZT_SN, 0x2, 2, TREG_SN, 1, | ||
236 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
237 | }, | ||
238 | { "bnz", TILE_OPC_BNZ, 0x2, 2, TREG_ZERO, 1, | ||
239 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
240 | }, | ||
241 | { "bnz.sn", TILE_OPC_BNZ_SN, 0x2, 2, TREG_SN, 1, | ||
242 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
243 | }, | ||
244 | { "bnzt", TILE_OPC_BNZT, 0x2, 2, TREG_ZERO, 1, | ||
245 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
246 | }, | ||
247 | { "bnzt.sn", TILE_OPC_BNZT_SN, 0x2, 2, TREG_SN, 1, | ||
248 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
249 | }, | ||
250 | { "bytex", TILE_OPC_BYTEX, 0x5, 2, TREG_ZERO, 1, | ||
251 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
252 | }, | ||
253 | { "bytex.sn", TILE_OPC_BYTEX_SN, 0x1, 2, TREG_SN, 1, | ||
254 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
255 | }, | ||
256 | { "bz", TILE_OPC_BZ, 0x2, 2, TREG_ZERO, 1, | ||
257 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
258 | }, | ||
259 | { "bz.sn", TILE_OPC_BZ_SN, 0x2, 2, TREG_SN, 1, | ||
260 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
261 | }, | ||
262 | { "bzt", TILE_OPC_BZT, 0x2, 2, TREG_ZERO, 1, | ||
263 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
264 | }, | ||
265 | { "bzt.sn", TILE_OPC_BZT_SN, 0x2, 2, TREG_SN, 1, | ||
266 | { { 0, }, { 10, 20 }, { 0, }, { 0, }, { 0, } }, | ||
267 | }, | ||
268 | { "clz", TILE_OPC_CLZ, 0x5, 2, TREG_ZERO, 1, | ||
269 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
270 | }, | ||
271 | { "clz.sn", TILE_OPC_CLZ_SN, 0x1, 2, TREG_SN, 1, | ||
272 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
273 | }, | ||
274 | { "crc32_32", TILE_OPC_CRC32_32, 0x1, 3, TREG_ZERO, 1, | ||
275 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
276 | }, | ||
277 | { "crc32_32.sn", TILE_OPC_CRC32_32_SN, 0x1, 3, TREG_SN, 1, | ||
278 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
279 | }, | ||
280 | { "crc32_8", TILE_OPC_CRC32_8, 0x1, 3, TREG_ZERO, 1, | ||
281 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
282 | }, | ||
283 | { "crc32_8.sn", TILE_OPC_CRC32_8_SN, 0x1, 3, TREG_SN, 1, | ||
284 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
285 | }, | ||
286 | { "ctz", TILE_OPC_CTZ, 0x5, 2, TREG_ZERO, 1, | ||
287 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
288 | }, | ||
289 | { "ctz.sn", TILE_OPC_CTZ_SN, 0x1, 2, TREG_SN, 1, | ||
290 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
291 | }, | ||
292 | { "drain", TILE_OPC_DRAIN, 0x2, 0, TREG_ZERO, 0, | ||
293 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
294 | }, | ||
295 | { "dtlbpr", TILE_OPC_DTLBPR, 0x2, 1, TREG_ZERO, 1, | ||
296 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
297 | }, | ||
298 | { "dword_align", TILE_OPC_DWORD_ALIGN, 0x1, 3, TREG_ZERO, 1, | ||
299 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
300 | }, | ||
301 | { "dword_align.sn", TILE_OPC_DWORD_ALIGN_SN, 0x1, 3, TREG_SN, 1, | ||
302 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
303 | }, | ||
304 | { "finv", TILE_OPC_FINV, 0x2, 1, TREG_ZERO, 1, | ||
305 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
306 | }, | ||
307 | { "flush", TILE_OPC_FLUSH, 0x2, 1, TREG_ZERO, 1, | ||
308 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
309 | }, | ||
310 | { "fnop", TILE_OPC_FNOP, 0xf, 0, TREG_ZERO, 1, | ||
311 | { { }, { }, { }, { }, { 0, } }, | ||
312 | }, | ||
313 | { "icoh", TILE_OPC_ICOH, 0x2, 1, TREG_ZERO, 1, | ||
314 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
315 | }, | ||
316 | { "ill", TILE_OPC_ILL, 0xa, 0, TREG_ZERO, 1, | ||
317 | { { 0, }, { }, { 0, }, { }, { 0, } }, | ||
318 | }, | ||
319 | { "inthb", TILE_OPC_INTHB, 0x3, 3, TREG_ZERO, 1, | ||
320 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
321 | }, | ||
322 | { "inthb.sn", TILE_OPC_INTHB_SN, 0x3, 3, TREG_SN, 1, | ||
323 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
324 | }, | ||
325 | { "inthh", TILE_OPC_INTHH, 0x3, 3, TREG_ZERO, 1, | ||
326 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
327 | }, | ||
328 | { "inthh.sn", TILE_OPC_INTHH_SN, 0x3, 3, TREG_SN, 1, | ||
329 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
330 | }, | ||
331 | { "intlb", TILE_OPC_INTLB, 0x3, 3, TREG_ZERO, 1, | ||
332 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
333 | }, | ||
334 | { "intlb.sn", TILE_OPC_INTLB_SN, 0x3, 3, TREG_SN, 1, | ||
335 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
336 | }, | ||
337 | { "intlh", TILE_OPC_INTLH, 0x3, 3, TREG_ZERO, 1, | ||
338 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
339 | }, | ||
340 | { "intlh.sn", TILE_OPC_INTLH_SN, 0x3, 3, TREG_SN, 1, | ||
341 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
342 | }, | ||
343 | { "inv", TILE_OPC_INV, 0x2, 1, TREG_ZERO, 1, | ||
344 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
345 | }, | ||
346 | { "iret", TILE_OPC_IRET, 0x2, 0, TREG_ZERO, 1, | ||
347 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
348 | }, | ||
349 | { "jalb", TILE_OPC_JALB, 0x2, 1, TREG_LR, 1, | ||
350 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
351 | }, | ||
352 | { "jalf", TILE_OPC_JALF, 0x2, 1, TREG_LR, 1, | ||
353 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
354 | }, | ||
355 | { "jalr", TILE_OPC_JALR, 0x2, 1, TREG_LR, 1, | ||
356 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
357 | }, | ||
358 | { "jalrp", TILE_OPC_JALRP, 0x2, 1, TREG_LR, 1, | ||
359 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
360 | }, | ||
361 | { "jb", TILE_OPC_JB, 0x2, 1, TREG_ZERO, 1, | ||
362 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
363 | }, | ||
364 | { "jf", TILE_OPC_JF, 0x2, 1, TREG_ZERO, 1, | ||
365 | { { 0, }, { 22 }, { 0, }, { 0, }, { 0, } }, | ||
366 | }, | ||
367 | { "jr", TILE_OPC_JR, 0x2, 1, TREG_ZERO, 1, | ||
368 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
369 | }, | ||
370 | { "jrp", TILE_OPC_JRP, 0x2, 1, TREG_ZERO, 1, | ||
371 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
372 | }, | ||
373 | { "lb", TILE_OPC_LB, 0x12, 2, TREG_ZERO, 1, | ||
374 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
375 | }, | ||
376 | { "lb.sn", TILE_OPC_LB_SN, 0x2, 2, TREG_SN, 1, | ||
377 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
378 | }, | ||
379 | { "lb_u", TILE_OPC_LB_U, 0x12, 2, TREG_ZERO, 1, | ||
380 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
381 | }, | ||
382 | { "lb_u.sn", TILE_OPC_LB_U_SN, 0x2, 2, TREG_SN, 1, | ||
383 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
384 | }, | ||
385 | { "lbadd", TILE_OPC_LBADD, 0x2, 3, TREG_ZERO, 1, | ||
386 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
387 | }, | ||
388 | { "lbadd.sn", TILE_OPC_LBADD_SN, 0x2, 3, TREG_SN, 1, | ||
389 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
390 | }, | ||
391 | { "lbadd_u", TILE_OPC_LBADD_U, 0x2, 3, TREG_ZERO, 1, | ||
392 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
393 | }, | ||
394 | { "lbadd_u.sn", TILE_OPC_LBADD_U_SN, 0x2, 3, TREG_SN, 1, | ||
395 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
396 | }, | ||
397 | { "lh", TILE_OPC_LH, 0x12, 2, TREG_ZERO, 1, | ||
398 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
399 | }, | ||
400 | { "lh.sn", TILE_OPC_LH_SN, 0x2, 2, TREG_SN, 1, | ||
401 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
402 | }, | ||
403 | { "lh_u", TILE_OPC_LH_U, 0x12, 2, TREG_ZERO, 1, | ||
404 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
405 | }, | ||
406 | { "lh_u.sn", TILE_OPC_LH_U_SN, 0x2, 2, TREG_SN, 1, | ||
407 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
408 | }, | ||
409 | { "lhadd", TILE_OPC_LHADD, 0x2, 3, TREG_ZERO, 1, | ||
410 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
411 | }, | ||
412 | { "lhadd.sn", TILE_OPC_LHADD_SN, 0x2, 3, TREG_SN, 1, | ||
413 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
414 | }, | ||
415 | { "lhadd_u", TILE_OPC_LHADD_U, 0x2, 3, TREG_ZERO, 1, | ||
416 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
417 | }, | ||
418 | { "lhadd_u.sn", TILE_OPC_LHADD_U_SN, 0x2, 3, TREG_SN, 1, | ||
419 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
420 | }, | ||
421 | { "lnk", TILE_OPC_LNK, 0x2, 1, TREG_ZERO, 1, | ||
422 | { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, | ||
423 | }, | ||
424 | { "lnk.sn", TILE_OPC_LNK_SN, 0x2, 1, TREG_SN, 1, | ||
425 | { { 0, }, { 9 }, { 0, }, { 0, }, { 0, } }, | ||
426 | }, | ||
427 | { "lw", TILE_OPC_LW, 0x12, 2, TREG_ZERO, 1, | ||
428 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 23, 15 } }, | ||
429 | }, | ||
430 | { "lw.sn", TILE_OPC_LW_SN, 0x2, 2, TREG_SN, 1, | ||
431 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
432 | }, | ||
433 | { "lw_na", TILE_OPC_LW_NA, 0x2, 2, TREG_ZERO, 1, | ||
434 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
435 | }, | ||
436 | { "lw_na.sn", TILE_OPC_LW_NA_SN, 0x2, 2, TREG_SN, 1, | ||
437 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
438 | }, | ||
439 | { "lwadd", TILE_OPC_LWADD, 0x2, 3, TREG_ZERO, 1, | ||
440 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
441 | }, | ||
442 | { "lwadd.sn", TILE_OPC_LWADD_SN, 0x2, 3, TREG_SN, 1, | ||
443 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
444 | }, | ||
445 | { "lwadd_na", TILE_OPC_LWADD_NA, 0x2, 3, TREG_ZERO, 1, | ||
446 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
447 | }, | ||
448 | { "lwadd_na.sn", TILE_OPC_LWADD_NA_SN, 0x2, 3, TREG_SN, 1, | ||
449 | { { 0, }, { 9, 24, 1 }, { 0, }, { 0, }, { 0, } }, | ||
450 | }, | ||
451 | { "maxb_u", TILE_OPC_MAXB_U, 0x3, 3, TREG_ZERO, 1, | ||
452 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
453 | }, | ||
454 | { "maxb_u.sn", TILE_OPC_MAXB_U_SN, 0x3, 3, TREG_SN, 1, | ||
455 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
456 | }, | ||
457 | { "maxh", TILE_OPC_MAXH, 0x3, 3, TREG_ZERO, 1, | ||
458 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
459 | }, | ||
460 | { "maxh.sn", TILE_OPC_MAXH_SN, 0x3, 3, TREG_SN, 1, | ||
461 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
462 | }, | ||
463 | { "maxib_u", TILE_OPC_MAXIB_U, 0x3, 3, TREG_ZERO, 1, | ||
464 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
465 | }, | ||
466 | { "maxib_u.sn", TILE_OPC_MAXIB_U_SN, 0x3, 3, TREG_SN, 1, | ||
467 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
468 | }, | ||
469 | { "maxih", TILE_OPC_MAXIH, 0x3, 3, TREG_ZERO, 1, | ||
470 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
471 | }, | ||
472 | { "maxih.sn", TILE_OPC_MAXIH_SN, 0x3, 3, TREG_SN, 1, | ||
473 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
474 | }, | ||
475 | { "mf", TILE_OPC_MF, 0x2, 0, TREG_ZERO, 1, | ||
476 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
477 | }, | ||
478 | { "mfspr", TILE_OPC_MFSPR, 0x2, 2, TREG_ZERO, 1, | ||
479 | { { 0, }, { 9, 25 }, { 0, }, { 0, }, { 0, } }, | ||
480 | }, | ||
481 | { "minb_u", TILE_OPC_MINB_U, 0x3, 3, TREG_ZERO, 1, | ||
482 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
483 | }, | ||
484 | { "minb_u.sn", TILE_OPC_MINB_U_SN, 0x3, 3, TREG_SN, 1, | ||
485 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
486 | }, | ||
487 | { "minh", TILE_OPC_MINH, 0x3, 3, TREG_ZERO, 1, | ||
488 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
489 | }, | ||
490 | { "minh.sn", TILE_OPC_MINH_SN, 0x3, 3, TREG_SN, 1, | ||
491 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
492 | }, | ||
493 | { "minib_u", TILE_OPC_MINIB_U, 0x3, 3, TREG_ZERO, 1, | ||
494 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
495 | }, | ||
496 | { "minib_u.sn", TILE_OPC_MINIB_U_SN, 0x3, 3, TREG_SN, 1, | ||
497 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
498 | }, | ||
499 | { "minih", TILE_OPC_MINIH, 0x3, 3, TREG_ZERO, 1, | ||
500 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
501 | }, | ||
502 | { "minih.sn", TILE_OPC_MINIH_SN, 0x3, 3, TREG_SN, 1, | ||
503 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
504 | }, | ||
505 | { "mm", TILE_OPC_MM, 0x3, 5, TREG_ZERO, 1, | ||
506 | { { 7, 8, 16, 26, 27 }, { 9, 10, 17, 28, 29 }, { 0, }, { 0, }, { 0, } }, | ||
507 | }, | ||
508 | { "mnz", TILE_OPC_MNZ, 0xf, 3, TREG_ZERO, 1, | ||
509 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
510 | }, | ||
511 | { "mnz.sn", TILE_OPC_MNZ_SN, 0x3, 3, TREG_SN, 1, | ||
512 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
513 | }, | ||
514 | { "mnzb", TILE_OPC_MNZB, 0x3, 3, TREG_ZERO, 1, | ||
515 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
516 | }, | ||
517 | { "mnzb.sn", TILE_OPC_MNZB_SN, 0x3, 3, TREG_SN, 1, | ||
518 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
519 | }, | ||
520 | { "mnzh", TILE_OPC_MNZH, 0x3, 3, TREG_ZERO, 1, | ||
521 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
522 | }, | ||
523 | { "mnzh.sn", TILE_OPC_MNZH_SN, 0x3, 3, TREG_SN, 1, | ||
524 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
525 | }, | ||
526 | { "mtspr", TILE_OPC_MTSPR, 0x2, 2, TREG_ZERO, 1, | ||
527 | { { 0, }, { 30, 10 }, { 0, }, { 0, }, { 0, } }, | ||
528 | }, | ||
529 | { "mulhh_ss", TILE_OPC_MULHH_SS, 0x5, 3, TREG_ZERO, 1, | ||
530 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
531 | }, | ||
532 | { "mulhh_ss.sn", TILE_OPC_MULHH_SS_SN, 0x1, 3, TREG_SN, 1, | ||
533 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
534 | }, | ||
535 | { "mulhh_su", TILE_OPC_MULHH_SU, 0x1, 3, TREG_ZERO, 1, | ||
536 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
537 | }, | ||
538 | { "mulhh_su.sn", TILE_OPC_MULHH_SU_SN, 0x1, 3, TREG_SN, 1, | ||
539 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
540 | }, | ||
541 | { "mulhh_uu", TILE_OPC_MULHH_UU, 0x5, 3, TREG_ZERO, 1, | ||
542 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
543 | }, | ||
544 | { "mulhh_uu.sn", TILE_OPC_MULHH_UU_SN, 0x1, 3, TREG_SN, 1, | ||
545 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
546 | }, | ||
547 | { "mulhha_ss", TILE_OPC_MULHHA_SS, 0x5, 3, TREG_ZERO, 1, | ||
548 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
549 | }, | ||
550 | { "mulhha_ss.sn", TILE_OPC_MULHHA_SS_SN, 0x1, 3, TREG_SN, 1, | ||
551 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
552 | }, | ||
553 | { "mulhha_su", TILE_OPC_MULHHA_SU, 0x1, 3, TREG_ZERO, 1, | ||
554 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
555 | }, | ||
556 | { "mulhha_su.sn", TILE_OPC_MULHHA_SU_SN, 0x1, 3, TREG_SN, 1, | ||
557 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
558 | }, | ||
559 | { "mulhha_uu", TILE_OPC_MULHHA_UU, 0x5, 3, TREG_ZERO, 1, | ||
560 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
561 | }, | ||
562 | { "mulhha_uu.sn", TILE_OPC_MULHHA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
563 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
564 | }, | ||
565 | { "mulhhsa_uu", TILE_OPC_MULHHSA_UU, 0x1, 3, TREG_ZERO, 1, | ||
566 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
567 | }, | ||
568 | { "mulhhsa_uu.sn", TILE_OPC_MULHHSA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
569 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
570 | }, | ||
571 | { "mulhl_ss", TILE_OPC_MULHL_SS, 0x1, 3, TREG_ZERO, 1, | ||
572 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
573 | }, | ||
574 | { "mulhl_ss.sn", TILE_OPC_MULHL_SS_SN, 0x1, 3, TREG_SN, 1, | ||
575 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
576 | }, | ||
577 | { "mulhl_su", TILE_OPC_MULHL_SU, 0x1, 3, TREG_ZERO, 1, | ||
578 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
579 | }, | ||
580 | { "mulhl_su.sn", TILE_OPC_MULHL_SU_SN, 0x1, 3, TREG_SN, 1, | ||
581 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
582 | }, | ||
583 | { "mulhl_us", TILE_OPC_MULHL_US, 0x1, 3, TREG_ZERO, 1, | ||
584 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
585 | }, | ||
586 | { "mulhl_us.sn", TILE_OPC_MULHL_US_SN, 0x1, 3, TREG_SN, 1, | ||
587 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
588 | }, | ||
589 | { "mulhl_uu", TILE_OPC_MULHL_UU, 0x1, 3, TREG_ZERO, 1, | ||
590 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
591 | }, | ||
592 | { "mulhl_uu.sn", TILE_OPC_MULHL_UU_SN, 0x1, 3, TREG_SN, 1, | ||
593 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
594 | }, | ||
595 | { "mulhla_ss", TILE_OPC_MULHLA_SS, 0x1, 3, TREG_ZERO, 1, | ||
596 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
597 | }, | ||
598 | { "mulhla_ss.sn", TILE_OPC_MULHLA_SS_SN, 0x1, 3, TREG_SN, 1, | ||
599 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
600 | }, | ||
601 | { "mulhla_su", TILE_OPC_MULHLA_SU, 0x1, 3, TREG_ZERO, 1, | ||
602 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
603 | }, | ||
604 | { "mulhla_su.sn", TILE_OPC_MULHLA_SU_SN, 0x1, 3, TREG_SN, 1, | ||
605 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
606 | }, | ||
607 | { "mulhla_us", TILE_OPC_MULHLA_US, 0x1, 3, TREG_ZERO, 1, | ||
608 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
609 | }, | ||
610 | { "mulhla_us.sn", TILE_OPC_MULHLA_US_SN, 0x1, 3, TREG_SN, 1, | ||
611 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
612 | }, | ||
613 | { "mulhla_uu", TILE_OPC_MULHLA_UU, 0x1, 3, TREG_ZERO, 1, | ||
614 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
615 | }, | ||
616 | { "mulhla_uu.sn", TILE_OPC_MULHLA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
617 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
618 | }, | ||
619 | { "mulhlsa_uu", TILE_OPC_MULHLSA_UU, 0x5, 3, TREG_ZERO, 1, | ||
620 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
621 | }, | ||
622 | { "mulhlsa_uu.sn", TILE_OPC_MULHLSA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
623 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
624 | }, | ||
625 | { "mulll_ss", TILE_OPC_MULLL_SS, 0x5, 3, TREG_ZERO, 1, | ||
626 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
627 | }, | ||
628 | { "mulll_ss.sn", TILE_OPC_MULLL_SS_SN, 0x1, 3, TREG_SN, 1, | ||
629 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
630 | }, | ||
631 | { "mulll_su", TILE_OPC_MULLL_SU, 0x1, 3, TREG_ZERO, 1, | ||
632 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
633 | }, | ||
634 | { "mulll_su.sn", TILE_OPC_MULLL_SU_SN, 0x1, 3, TREG_SN, 1, | ||
635 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
636 | }, | ||
637 | { "mulll_uu", TILE_OPC_MULLL_UU, 0x5, 3, TREG_ZERO, 1, | ||
638 | { { 7, 8, 16 }, { 0, }, { 11, 12, 18 }, { 0, }, { 0, } }, | ||
639 | }, | ||
640 | { "mulll_uu.sn", TILE_OPC_MULLL_UU_SN, 0x1, 3, TREG_SN, 1, | ||
641 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
642 | }, | ||
643 | { "mullla_ss", TILE_OPC_MULLLA_SS, 0x5, 3, TREG_ZERO, 1, | ||
644 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
645 | }, | ||
646 | { "mullla_ss.sn", TILE_OPC_MULLLA_SS_SN, 0x1, 3, TREG_SN, 1, | ||
647 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
648 | }, | ||
649 | { "mullla_su", TILE_OPC_MULLLA_SU, 0x1, 3, TREG_ZERO, 1, | ||
650 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
651 | }, | ||
652 | { "mullla_su.sn", TILE_OPC_MULLLA_SU_SN, 0x1, 3, TREG_SN, 1, | ||
653 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
654 | }, | ||
655 | { "mullla_uu", TILE_OPC_MULLLA_UU, 0x5, 3, TREG_ZERO, 1, | ||
656 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
657 | }, | ||
658 | { "mullla_uu.sn", TILE_OPC_MULLLA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
659 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
660 | }, | ||
661 | { "mulllsa_uu", TILE_OPC_MULLLSA_UU, 0x1, 3, TREG_ZERO, 1, | ||
662 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
663 | }, | ||
664 | { "mulllsa_uu.sn", TILE_OPC_MULLLSA_UU_SN, 0x1, 3, TREG_SN, 1, | ||
665 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
666 | }, | ||
667 | { "mvnz", TILE_OPC_MVNZ, 0x5, 3, TREG_ZERO, 1, | ||
668 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
669 | }, | ||
670 | { "mvnz.sn", TILE_OPC_MVNZ_SN, 0x1, 3, TREG_SN, 1, | ||
671 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
672 | }, | ||
673 | { "mvz", TILE_OPC_MVZ, 0x5, 3, TREG_ZERO, 1, | ||
674 | { { 21, 8, 16 }, { 0, }, { 31, 12, 18 }, { 0, }, { 0, } }, | ||
675 | }, | ||
676 | { "mvz.sn", TILE_OPC_MVZ_SN, 0x1, 3, TREG_SN, 1, | ||
677 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
678 | }, | ||
679 | { "mz", TILE_OPC_MZ, 0xf, 3, TREG_ZERO, 1, | ||
680 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
681 | }, | ||
682 | { "mz.sn", TILE_OPC_MZ_SN, 0x3, 3, TREG_SN, 1, | ||
683 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
684 | }, | ||
685 | { "mzb", TILE_OPC_MZB, 0x3, 3, TREG_ZERO, 1, | ||
686 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
687 | }, | ||
688 | { "mzb.sn", TILE_OPC_MZB_SN, 0x3, 3, TREG_SN, 1, | ||
689 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
690 | }, | ||
691 | { "mzh", TILE_OPC_MZH, 0x3, 3, TREG_ZERO, 1, | ||
692 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
693 | }, | ||
694 | { "mzh.sn", TILE_OPC_MZH_SN, 0x3, 3, TREG_SN, 1, | ||
695 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
696 | }, | ||
697 | { "nap", TILE_OPC_NAP, 0x2, 0, TREG_ZERO, 0, | ||
698 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
699 | }, | ||
700 | { "nop", TILE_OPC_NOP, 0xf, 0, TREG_ZERO, 1, | ||
701 | { { }, { }, { }, { }, { 0, } }, | ||
702 | }, | ||
703 | { "nor", TILE_OPC_NOR, 0xf, 3, TREG_ZERO, 1, | ||
704 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
705 | }, | ||
706 | { "nor.sn", TILE_OPC_NOR_SN, 0x3, 3, TREG_SN, 1, | ||
707 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
708 | }, | ||
709 | { "or", TILE_OPC_OR, 0xf, 3, TREG_ZERO, 1, | ||
710 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
711 | }, | ||
712 | { "or.sn", TILE_OPC_OR_SN, 0x3, 3, TREG_SN, 1, | ||
713 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
714 | }, | ||
715 | { "ori", TILE_OPC_ORI, 0xf, 3, TREG_ZERO, 1, | ||
716 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
717 | }, | ||
718 | { "ori.sn", TILE_OPC_ORI_SN, 0x3, 3, TREG_SN, 1, | ||
719 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
720 | }, | ||
721 | { "packbs_u", TILE_OPC_PACKBS_U, 0x3, 3, TREG_ZERO, 1, | ||
722 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
723 | }, | ||
724 | { "packbs_u.sn", TILE_OPC_PACKBS_U_SN, 0x3, 3, TREG_SN, 1, | ||
725 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
726 | }, | ||
727 | { "packhb", TILE_OPC_PACKHB, 0x3, 3, TREG_ZERO, 1, | ||
728 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
729 | }, | ||
730 | { "packhb.sn", TILE_OPC_PACKHB_SN, 0x3, 3, TREG_SN, 1, | ||
731 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
732 | }, | ||
733 | { "packhs", TILE_OPC_PACKHS, 0x3, 3, TREG_ZERO, 1, | ||
734 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
735 | }, | ||
736 | { "packhs.sn", TILE_OPC_PACKHS_SN, 0x3, 3, TREG_SN, 1, | ||
737 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
738 | }, | ||
739 | { "packlb", TILE_OPC_PACKLB, 0x3, 3, TREG_ZERO, 1, | ||
740 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
741 | }, | ||
742 | { "packlb.sn", TILE_OPC_PACKLB_SN, 0x3, 3, TREG_SN, 1, | ||
743 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
744 | }, | ||
745 | { "pcnt", TILE_OPC_PCNT, 0x5, 2, TREG_ZERO, 1, | ||
746 | { { 7, 8 }, { 0, }, { 11, 12 }, { 0, }, { 0, } }, | ||
747 | }, | ||
748 | { "pcnt.sn", TILE_OPC_PCNT_SN, 0x1, 2, TREG_SN, 1, | ||
749 | { { 7, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
750 | }, | ||
751 | { "rl", TILE_OPC_RL, 0xf, 3, TREG_ZERO, 1, | ||
752 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
753 | }, | ||
754 | { "rl.sn", TILE_OPC_RL_SN, 0x3, 3, TREG_SN, 1, | ||
755 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
756 | }, | ||
757 | { "rli", TILE_OPC_RLI, 0xf, 3, TREG_ZERO, 1, | ||
758 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
759 | }, | ||
760 | { "rli.sn", TILE_OPC_RLI_SN, 0x3, 3, TREG_SN, 1, | ||
761 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
762 | }, | ||
763 | { "s1a", TILE_OPC_S1A, 0xf, 3, TREG_ZERO, 1, | ||
764 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
765 | }, | ||
766 | { "s1a.sn", TILE_OPC_S1A_SN, 0x3, 3, TREG_SN, 1, | ||
767 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
768 | }, | ||
769 | { "s2a", TILE_OPC_S2A, 0xf, 3, TREG_ZERO, 1, | ||
770 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
771 | }, | ||
772 | { "s2a.sn", TILE_OPC_S2A_SN, 0x3, 3, TREG_SN, 1, | ||
773 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
774 | }, | ||
775 | { "s3a", TILE_OPC_S3A, 0xf, 3, TREG_ZERO, 1, | ||
776 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
777 | }, | ||
778 | { "s3a.sn", TILE_OPC_S3A_SN, 0x3, 3, TREG_SN, 1, | ||
779 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
780 | }, | ||
781 | { "sadab_u", TILE_OPC_SADAB_U, 0x1, 3, TREG_ZERO, 1, | ||
782 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
783 | }, | ||
784 | { "sadab_u.sn", TILE_OPC_SADAB_U_SN, 0x1, 3, TREG_SN, 1, | ||
785 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
786 | }, | ||
787 | { "sadah", TILE_OPC_SADAH, 0x1, 3, TREG_ZERO, 1, | ||
788 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
789 | }, | ||
790 | { "sadah.sn", TILE_OPC_SADAH_SN, 0x1, 3, TREG_SN, 1, | ||
791 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
792 | }, | ||
793 | { "sadah_u", TILE_OPC_SADAH_U, 0x1, 3, TREG_ZERO, 1, | ||
794 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
795 | }, | ||
796 | { "sadah_u.sn", TILE_OPC_SADAH_U_SN, 0x1, 3, TREG_SN, 1, | ||
797 | { { 21, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
798 | }, | ||
799 | { "sadb_u", TILE_OPC_SADB_U, 0x1, 3, TREG_ZERO, 1, | ||
800 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
801 | }, | ||
802 | { "sadb_u.sn", TILE_OPC_SADB_U_SN, 0x1, 3, TREG_SN, 1, | ||
803 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
804 | }, | ||
805 | { "sadh", TILE_OPC_SADH, 0x1, 3, TREG_ZERO, 1, | ||
806 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
807 | }, | ||
808 | { "sadh.sn", TILE_OPC_SADH_SN, 0x1, 3, TREG_SN, 1, | ||
809 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
810 | }, | ||
811 | { "sadh_u", TILE_OPC_SADH_U, 0x1, 3, TREG_ZERO, 1, | ||
812 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
813 | }, | ||
814 | { "sadh_u.sn", TILE_OPC_SADH_U_SN, 0x1, 3, TREG_SN, 1, | ||
815 | { { 7, 8, 16 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
816 | }, | ||
817 | { "sb", TILE_OPC_SB, 0x12, 2, TREG_ZERO, 1, | ||
818 | { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, | ||
819 | }, | ||
820 | { "sbadd", TILE_OPC_SBADD, 0x2, 3, TREG_ZERO, 1, | ||
821 | { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, | ||
822 | }, | ||
823 | { "seq", TILE_OPC_SEQ, 0xf, 3, TREG_ZERO, 1, | ||
824 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
825 | }, | ||
826 | { "seq.sn", TILE_OPC_SEQ_SN, 0x3, 3, TREG_SN, 1, | ||
827 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
828 | }, | ||
829 | { "seqb", TILE_OPC_SEQB, 0x3, 3, TREG_ZERO, 1, | ||
830 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
831 | }, | ||
832 | { "seqb.sn", TILE_OPC_SEQB_SN, 0x3, 3, TREG_SN, 1, | ||
833 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
834 | }, | ||
835 | { "seqh", TILE_OPC_SEQH, 0x3, 3, TREG_ZERO, 1, | ||
836 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
837 | }, | ||
838 | { "seqh.sn", TILE_OPC_SEQH_SN, 0x3, 3, TREG_SN, 1, | ||
839 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
840 | }, | ||
841 | { "seqi", TILE_OPC_SEQI, 0xf, 3, TREG_ZERO, 1, | ||
842 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
843 | }, | ||
844 | { "seqi.sn", TILE_OPC_SEQI_SN, 0x3, 3, TREG_SN, 1, | ||
845 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
846 | }, | ||
847 | { "seqib", TILE_OPC_SEQIB, 0x3, 3, TREG_ZERO, 1, | ||
848 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
849 | }, | ||
850 | { "seqib.sn", TILE_OPC_SEQIB_SN, 0x3, 3, TREG_SN, 1, | ||
851 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
852 | }, | ||
853 | { "seqih", TILE_OPC_SEQIH, 0x3, 3, TREG_ZERO, 1, | ||
854 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
855 | }, | ||
856 | { "seqih.sn", TILE_OPC_SEQIH_SN, 0x3, 3, TREG_SN, 1, | ||
857 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
858 | }, | ||
859 | { "sh", TILE_OPC_SH, 0x12, 2, TREG_ZERO, 1, | ||
860 | { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, | ||
861 | }, | ||
862 | { "shadd", TILE_OPC_SHADD, 0x2, 3, TREG_ZERO, 1, | ||
863 | { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, | ||
864 | }, | ||
865 | { "shl", TILE_OPC_SHL, 0xf, 3, TREG_ZERO, 1, | ||
866 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
867 | }, | ||
868 | { "shl.sn", TILE_OPC_SHL_SN, 0x3, 3, TREG_SN, 1, | ||
869 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
870 | }, | ||
871 | { "shlb", TILE_OPC_SHLB, 0x3, 3, TREG_ZERO, 1, | ||
872 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
873 | }, | ||
874 | { "shlb.sn", TILE_OPC_SHLB_SN, 0x3, 3, TREG_SN, 1, | ||
875 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
876 | }, | ||
877 | { "shlh", TILE_OPC_SHLH, 0x3, 3, TREG_ZERO, 1, | ||
878 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
879 | }, | ||
880 | { "shlh.sn", TILE_OPC_SHLH_SN, 0x3, 3, TREG_SN, 1, | ||
881 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
882 | }, | ||
883 | { "shli", TILE_OPC_SHLI, 0xf, 3, TREG_ZERO, 1, | ||
884 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
885 | }, | ||
886 | { "shli.sn", TILE_OPC_SHLI_SN, 0x3, 3, TREG_SN, 1, | ||
887 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
888 | }, | ||
889 | { "shlib", TILE_OPC_SHLIB, 0x3, 3, TREG_ZERO, 1, | ||
890 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
891 | }, | ||
892 | { "shlib.sn", TILE_OPC_SHLIB_SN, 0x3, 3, TREG_SN, 1, | ||
893 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
894 | }, | ||
895 | { "shlih", TILE_OPC_SHLIH, 0x3, 3, TREG_ZERO, 1, | ||
896 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
897 | }, | ||
898 | { "shlih.sn", TILE_OPC_SHLIH_SN, 0x3, 3, TREG_SN, 1, | ||
899 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
900 | }, | ||
901 | { "shr", TILE_OPC_SHR, 0xf, 3, TREG_ZERO, 1, | ||
902 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
903 | }, | ||
904 | { "shr.sn", TILE_OPC_SHR_SN, 0x3, 3, TREG_SN, 1, | ||
905 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
906 | }, | ||
907 | { "shrb", TILE_OPC_SHRB, 0x3, 3, TREG_ZERO, 1, | ||
908 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
909 | }, | ||
910 | { "shrb.sn", TILE_OPC_SHRB_SN, 0x3, 3, TREG_SN, 1, | ||
911 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
912 | }, | ||
913 | { "shrh", TILE_OPC_SHRH, 0x3, 3, TREG_ZERO, 1, | ||
914 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
915 | }, | ||
916 | { "shrh.sn", TILE_OPC_SHRH_SN, 0x3, 3, TREG_SN, 1, | ||
917 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
918 | }, | ||
919 | { "shri", TILE_OPC_SHRI, 0xf, 3, TREG_ZERO, 1, | ||
920 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
921 | }, | ||
922 | { "shri.sn", TILE_OPC_SHRI_SN, 0x3, 3, TREG_SN, 1, | ||
923 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
924 | }, | ||
925 | { "shrib", TILE_OPC_SHRIB, 0x3, 3, TREG_ZERO, 1, | ||
926 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
927 | }, | ||
928 | { "shrib.sn", TILE_OPC_SHRIB_SN, 0x3, 3, TREG_SN, 1, | ||
929 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
930 | }, | ||
931 | { "shrih", TILE_OPC_SHRIH, 0x3, 3, TREG_ZERO, 1, | ||
932 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
933 | }, | ||
934 | { "shrih.sn", TILE_OPC_SHRIH_SN, 0x3, 3, TREG_SN, 1, | ||
935 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
936 | }, | ||
937 | { "slt", TILE_OPC_SLT, 0xf, 3, TREG_ZERO, 1, | ||
938 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
939 | }, | ||
940 | { "slt.sn", TILE_OPC_SLT_SN, 0x3, 3, TREG_SN, 1, | ||
941 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
942 | }, | ||
943 | { "slt_u", TILE_OPC_SLT_U, 0xf, 3, TREG_ZERO, 1, | ||
944 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
945 | }, | ||
946 | { "slt_u.sn", TILE_OPC_SLT_U_SN, 0x3, 3, TREG_SN, 1, | ||
947 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
948 | }, | ||
949 | { "sltb", TILE_OPC_SLTB, 0x3, 3, TREG_ZERO, 1, | ||
950 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
951 | }, | ||
952 | { "sltb.sn", TILE_OPC_SLTB_SN, 0x3, 3, TREG_SN, 1, | ||
953 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
954 | }, | ||
955 | { "sltb_u", TILE_OPC_SLTB_U, 0x3, 3, TREG_ZERO, 1, | ||
956 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
957 | }, | ||
958 | { "sltb_u.sn", TILE_OPC_SLTB_U_SN, 0x3, 3, TREG_SN, 1, | ||
959 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
960 | }, | ||
961 | { "slte", TILE_OPC_SLTE, 0xf, 3, TREG_ZERO, 1, | ||
962 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
963 | }, | ||
964 | { "slte.sn", TILE_OPC_SLTE_SN, 0x3, 3, TREG_SN, 1, | ||
965 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
966 | }, | ||
967 | { "slte_u", TILE_OPC_SLTE_U, 0xf, 3, TREG_ZERO, 1, | ||
968 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
969 | }, | ||
970 | { "slte_u.sn", TILE_OPC_SLTE_U_SN, 0x3, 3, TREG_SN, 1, | ||
971 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
972 | }, | ||
973 | { "slteb", TILE_OPC_SLTEB, 0x3, 3, TREG_ZERO, 1, | ||
974 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
975 | }, | ||
976 | { "slteb.sn", TILE_OPC_SLTEB_SN, 0x3, 3, TREG_SN, 1, | ||
977 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
978 | }, | ||
979 | { "slteb_u", TILE_OPC_SLTEB_U, 0x3, 3, TREG_ZERO, 1, | ||
980 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
981 | }, | ||
982 | { "slteb_u.sn", TILE_OPC_SLTEB_U_SN, 0x3, 3, TREG_SN, 1, | ||
983 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
984 | }, | ||
985 | { "slteh", TILE_OPC_SLTEH, 0x3, 3, TREG_ZERO, 1, | ||
986 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
987 | }, | ||
988 | { "slteh.sn", TILE_OPC_SLTEH_SN, 0x3, 3, TREG_SN, 1, | ||
989 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
990 | }, | ||
991 | { "slteh_u", TILE_OPC_SLTEH_U, 0x3, 3, TREG_ZERO, 1, | ||
992 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
993 | }, | ||
994 | { "slteh_u.sn", TILE_OPC_SLTEH_U_SN, 0x3, 3, TREG_SN, 1, | ||
995 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
996 | }, | ||
997 | { "slth", TILE_OPC_SLTH, 0x3, 3, TREG_ZERO, 1, | ||
998 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
999 | }, | ||
1000 | { "slth.sn", TILE_OPC_SLTH_SN, 0x3, 3, TREG_SN, 1, | ||
1001 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1002 | }, | ||
1003 | { "slth_u", TILE_OPC_SLTH_U, 0x3, 3, TREG_ZERO, 1, | ||
1004 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1005 | }, | ||
1006 | { "slth_u.sn", TILE_OPC_SLTH_U_SN, 0x3, 3, TREG_SN, 1, | ||
1007 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1008 | }, | ||
1009 | { "slti", TILE_OPC_SLTI, 0xf, 3, TREG_ZERO, 1, | ||
1010 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
1011 | }, | ||
1012 | { "slti.sn", TILE_OPC_SLTI_SN, 0x3, 3, TREG_SN, 1, | ||
1013 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1014 | }, | ||
1015 | { "slti_u", TILE_OPC_SLTI_U, 0xf, 3, TREG_ZERO, 1, | ||
1016 | { { 7, 8, 0 }, { 9, 10, 1 }, { 11, 12, 2 }, { 13, 14, 3 }, { 0, } }, | ||
1017 | }, | ||
1018 | { "slti_u.sn", TILE_OPC_SLTI_U_SN, 0x3, 3, TREG_SN, 1, | ||
1019 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1020 | }, | ||
1021 | { "sltib", TILE_OPC_SLTIB, 0x3, 3, TREG_ZERO, 1, | ||
1022 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1023 | }, | ||
1024 | { "sltib.sn", TILE_OPC_SLTIB_SN, 0x3, 3, TREG_SN, 1, | ||
1025 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1026 | }, | ||
1027 | { "sltib_u", TILE_OPC_SLTIB_U, 0x3, 3, TREG_ZERO, 1, | ||
1028 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1029 | }, | ||
1030 | { "sltib_u.sn", TILE_OPC_SLTIB_U_SN, 0x3, 3, TREG_SN, 1, | ||
1031 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1032 | }, | ||
1033 | { "sltih", TILE_OPC_SLTIH, 0x3, 3, TREG_ZERO, 1, | ||
1034 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1035 | }, | ||
1036 | { "sltih.sn", TILE_OPC_SLTIH_SN, 0x3, 3, TREG_SN, 1, | ||
1037 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1038 | }, | ||
1039 | { "sltih_u", TILE_OPC_SLTIH_U, 0x3, 3, TREG_ZERO, 1, | ||
1040 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1041 | }, | ||
1042 | { "sltih_u.sn", TILE_OPC_SLTIH_U_SN, 0x3, 3, TREG_SN, 1, | ||
1043 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1044 | }, | ||
1045 | { "sne", TILE_OPC_SNE, 0xf, 3, TREG_ZERO, 1, | ||
1046 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1047 | }, | ||
1048 | { "sne.sn", TILE_OPC_SNE_SN, 0x3, 3, TREG_SN, 1, | ||
1049 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1050 | }, | ||
1051 | { "sneb", TILE_OPC_SNEB, 0x3, 3, TREG_ZERO, 1, | ||
1052 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1053 | }, | ||
1054 | { "sneb.sn", TILE_OPC_SNEB_SN, 0x3, 3, TREG_SN, 1, | ||
1055 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1056 | }, | ||
1057 | { "sneh", TILE_OPC_SNEH, 0x3, 3, TREG_ZERO, 1, | ||
1058 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1059 | }, | ||
1060 | { "sneh.sn", TILE_OPC_SNEH_SN, 0x3, 3, TREG_SN, 1, | ||
1061 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1062 | }, | ||
1063 | { "sra", TILE_OPC_SRA, 0xf, 3, TREG_ZERO, 1, | ||
1064 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1065 | }, | ||
1066 | { "sra.sn", TILE_OPC_SRA_SN, 0x3, 3, TREG_SN, 1, | ||
1067 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1068 | }, | ||
1069 | { "srab", TILE_OPC_SRAB, 0x3, 3, TREG_ZERO, 1, | ||
1070 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1071 | }, | ||
1072 | { "srab.sn", TILE_OPC_SRAB_SN, 0x3, 3, TREG_SN, 1, | ||
1073 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1074 | }, | ||
1075 | { "srah", TILE_OPC_SRAH, 0x3, 3, TREG_ZERO, 1, | ||
1076 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1077 | }, | ||
1078 | { "srah.sn", TILE_OPC_SRAH_SN, 0x3, 3, TREG_SN, 1, | ||
1079 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1080 | }, | ||
1081 | { "srai", TILE_OPC_SRAI, 0xf, 3, TREG_ZERO, 1, | ||
1082 | { { 7, 8, 32 }, { 9, 10, 33 }, { 11, 12, 34 }, { 13, 14, 35 }, { 0, } }, | ||
1083 | }, | ||
1084 | { "srai.sn", TILE_OPC_SRAI_SN, 0x3, 3, TREG_SN, 1, | ||
1085 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1086 | }, | ||
1087 | { "sraib", TILE_OPC_SRAIB, 0x3, 3, TREG_ZERO, 1, | ||
1088 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1089 | }, | ||
1090 | { "sraib.sn", TILE_OPC_SRAIB_SN, 0x3, 3, TREG_SN, 1, | ||
1091 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1092 | }, | ||
1093 | { "sraih", TILE_OPC_SRAIH, 0x3, 3, TREG_ZERO, 1, | ||
1094 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1095 | }, | ||
1096 | { "sraih.sn", TILE_OPC_SRAIH_SN, 0x3, 3, TREG_SN, 1, | ||
1097 | { { 7, 8, 32 }, { 9, 10, 33 }, { 0, }, { 0, }, { 0, } }, | ||
1098 | }, | ||
1099 | { "sub", TILE_OPC_SUB, 0xf, 3, TREG_ZERO, 1, | ||
1100 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1101 | }, | ||
1102 | { "sub.sn", TILE_OPC_SUB_SN, 0x3, 3, TREG_SN, 1, | ||
1103 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1104 | }, | ||
1105 | { "subb", TILE_OPC_SUBB, 0x3, 3, TREG_ZERO, 1, | ||
1106 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1107 | }, | ||
1108 | { "subb.sn", TILE_OPC_SUBB_SN, 0x3, 3, TREG_SN, 1, | ||
1109 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1110 | }, | ||
1111 | { "subbs_u", TILE_OPC_SUBBS_U, 0x3, 3, TREG_ZERO, 1, | ||
1112 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1113 | }, | ||
1114 | { "subbs_u.sn", TILE_OPC_SUBBS_U_SN, 0x3, 3, TREG_SN, 1, | ||
1115 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1116 | }, | ||
1117 | { "subh", TILE_OPC_SUBH, 0x3, 3, TREG_ZERO, 1, | ||
1118 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1119 | }, | ||
1120 | { "subh.sn", TILE_OPC_SUBH_SN, 0x3, 3, TREG_SN, 1, | ||
1121 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1122 | }, | ||
1123 | { "subhs", TILE_OPC_SUBHS, 0x3, 3, TREG_ZERO, 1, | ||
1124 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1125 | }, | ||
1126 | { "subhs.sn", TILE_OPC_SUBHS_SN, 0x3, 3, TREG_SN, 1, | ||
1127 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1128 | }, | ||
1129 | { "subs", TILE_OPC_SUBS, 0x3, 3, TREG_ZERO, 1, | ||
1130 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1131 | }, | ||
1132 | { "subs.sn", TILE_OPC_SUBS_SN, 0x3, 3, TREG_SN, 1, | ||
1133 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1134 | }, | ||
1135 | { "sw", TILE_OPC_SW, 0x12, 2, TREG_ZERO, 1, | ||
1136 | { { 0, }, { 10, 17 }, { 0, }, { 0, }, { 15, 36 } }, | ||
1137 | }, | ||
1138 | { "swadd", TILE_OPC_SWADD, 0x2, 3, TREG_ZERO, 1, | ||
1139 | { { 0, }, { 24, 17, 37 }, { 0, }, { 0, }, { 0, } }, | ||
1140 | }, | ||
1141 | { "swint0", TILE_OPC_SWINT0, 0x2, 0, TREG_ZERO, 0, | ||
1142 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1143 | }, | ||
1144 | { "swint1", TILE_OPC_SWINT1, 0x2, 0, TREG_ZERO, 0, | ||
1145 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1146 | }, | ||
1147 | { "swint2", TILE_OPC_SWINT2, 0x2, 0, TREG_ZERO, 0, | ||
1148 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1149 | }, | ||
1150 | { "swint3", TILE_OPC_SWINT3, 0x2, 0, TREG_ZERO, 0, | ||
1151 | { { 0, }, { }, { 0, }, { 0, }, { 0, } }, | ||
1152 | }, | ||
1153 | { "tblidxb0", TILE_OPC_TBLIDXB0, 0x5, 2, TREG_ZERO, 1, | ||
1154 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1155 | }, | ||
1156 | { "tblidxb0.sn", TILE_OPC_TBLIDXB0_SN, 0x1, 2, TREG_SN, 1, | ||
1157 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1158 | }, | ||
1159 | { "tblidxb1", TILE_OPC_TBLIDXB1, 0x5, 2, TREG_ZERO, 1, | ||
1160 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1161 | }, | ||
1162 | { "tblidxb1.sn", TILE_OPC_TBLIDXB1_SN, 0x1, 2, TREG_SN, 1, | ||
1163 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1164 | }, | ||
1165 | { "tblidxb2", TILE_OPC_TBLIDXB2, 0x5, 2, TREG_ZERO, 1, | ||
1166 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1167 | }, | ||
1168 | { "tblidxb2.sn", TILE_OPC_TBLIDXB2_SN, 0x1, 2, TREG_SN, 1, | ||
1169 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1170 | }, | ||
1171 | { "tblidxb3", TILE_OPC_TBLIDXB3, 0x5, 2, TREG_ZERO, 1, | ||
1172 | { { 21, 8 }, { 0, }, { 31, 12 }, { 0, }, { 0, } }, | ||
1173 | }, | ||
1174 | { "tblidxb3.sn", TILE_OPC_TBLIDXB3_SN, 0x1, 2, TREG_SN, 1, | ||
1175 | { { 21, 8 }, { 0, }, { 0, }, { 0, }, { 0, } }, | ||
1176 | }, | ||
1177 | { "tns", TILE_OPC_TNS, 0x2, 2, TREG_ZERO, 1, | ||
1178 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
1179 | }, | ||
1180 | { "tns.sn", TILE_OPC_TNS_SN, 0x2, 2, TREG_SN, 1, | ||
1181 | { { 0, }, { 9, 10 }, { 0, }, { 0, }, { 0, } }, | ||
1182 | }, | ||
1183 | { "wh64", TILE_OPC_WH64, 0x2, 1, TREG_ZERO, 1, | ||
1184 | { { 0, }, { 10 }, { 0, }, { 0, }, { 0, } }, | ||
1185 | }, | ||
1186 | { "xor", TILE_OPC_XOR, 0xf, 3, TREG_ZERO, 1, | ||
1187 | { { 7, 8, 16 }, { 9, 10, 17 }, { 11, 12, 18 }, { 13, 14, 19 }, { 0, } }, | ||
1188 | }, | ||
1189 | { "xor.sn", TILE_OPC_XOR_SN, 0x3, 3, TREG_SN, 1, | ||
1190 | { { 7, 8, 16 }, { 9, 10, 17 }, { 0, }, { 0, }, { 0, } }, | ||
1191 | }, | ||
1192 | { "xori", TILE_OPC_XORI, 0x3, 3, TREG_ZERO, 1, | ||
1193 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1194 | }, | ||
1195 | { "xori.sn", TILE_OPC_XORI_SN, 0x3, 3, TREG_SN, 1, | ||
1196 | { { 7, 8, 0 }, { 9, 10, 1 }, { 0, }, { 0, }, { 0, } }, | ||
1197 | }, | ||
1198 | { NULL, TILE_OPC_NONE, 0, 0, TREG_ZERO, 0, { { 0, } }, | ||
1199 | } | ||
1200 | }; | ||
1201 | #define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6)) | ||
1202 | #define CHILD(array_index) (TILE_OPC_NONE + (array_index)) | ||
1203 | |||
1204 | static const unsigned short decode_X0_fsm[1153] = | ||
1205 | { | ||
1206 | BITFIELD(22, 9) /* index 0 */, | ||
1207 | CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613), | ||
1208 | CHILD(630), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1209 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1210 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1211 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1212 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1213 | TILE_OPC_NONE, CHILD(663), CHILD(680), CHILD(697), CHILD(714), CHILD(746), | ||
1214 | CHILD(763), CHILD(780), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1215 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1216 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1217 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1218 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1219 | TILE_OPC_NONE, TILE_OPC_NONE, CHILD(813), CHILD(813), CHILD(813), | ||
1220 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1221 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1222 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1223 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1224 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1225 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1226 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1227 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1228 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1229 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
1230 | CHILD(813), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1231 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1232 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1233 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1234 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1235 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1236 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1237 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1238 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1239 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
1240 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(843), | ||
1241 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1242 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1243 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1244 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1245 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1246 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1247 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1248 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1249 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1250 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1251 | CHILD(843), CHILD(843), CHILD(843), CHILD(873), CHILD(878), CHILD(883), | ||
1252 | CHILD(903), CHILD(908), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1253 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1254 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1255 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1256 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1257 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(913), | ||
1258 | CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILE_OPC_NONE, | ||
1259 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1260 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1261 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1262 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1263 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1264 | TILE_OPC_NONE, CHILD(953), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1265 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1266 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1267 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1268 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1269 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1270 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(988), TILE_OPC_NONE, | ||
1271 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1272 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1273 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1274 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1275 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1276 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1277 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1278 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1279 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1280 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1281 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1282 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1283 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1284 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1285 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1286 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1287 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1288 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1289 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, CHILD(993), | ||
1290 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1291 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1292 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1293 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1294 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1295 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1296 | TILE_OPC_NONE, CHILD(1076), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1297 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1298 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1299 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1300 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1301 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1302 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1303 | BITFIELD(18, 4) /* index 513 */, | ||
1304 | TILE_OPC_NONE, TILE_OPC_ADDB, TILE_OPC_ADDH, TILE_OPC_ADD, | ||
1305 | TILE_OPC_ADIFFB_U, TILE_OPC_ADIFFH, TILE_OPC_AND, TILE_OPC_AVGB_U, | ||
1306 | TILE_OPC_AVGH, TILE_OPC_CRC32_32, TILE_OPC_CRC32_8, TILE_OPC_INTHB, | ||
1307 | TILE_OPC_INTHH, TILE_OPC_INTLB, TILE_OPC_INTLH, TILE_OPC_MAXB_U, | ||
1308 | BITFIELD(18, 4) /* index 530 */, | ||
1309 | TILE_OPC_MAXH, TILE_OPC_MINB_U, TILE_OPC_MINH, TILE_OPC_MNZB, TILE_OPC_MNZH, | ||
1310 | TILE_OPC_MNZ, TILE_OPC_MULHHA_SS, TILE_OPC_MULHHA_SU, TILE_OPC_MULHHA_UU, | ||
1311 | TILE_OPC_MULHHSA_UU, TILE_OPC_MULHH_SS, TILE_OPC_MULHH_SU, | ||
1312 | TILE_OPC_MULHH_UU, TILE_OPC_MULHLA_SS, TILE_OPC_MULHLA_SU, | ||
1313 | TILE_OPC_MULHLA_US, | ||
1314 | BITFIELD(18, 4) /* index 547 */, | ||
1315 | TILE_OPC_MULHLA_UU, TILE_OPC_MULHLSA_UU, TILE_OPC_MULHL_SS, | ||
1316 | TILE_OPC_MULHL_SU, TILE_OPC_MULHL_US, TILE_OPC_MULHL_UU, TILE_OPC_MULLLA_SS, | ||
1317 | TILE_OPC_MULLLA_SU, TILE_OPC_MULLLA_UU, TILE_OPC_MULLLSA_UU, | ||
1318 | TILE_OPC_MULLL_SS, TILE_OPC_MULLL_SU, TILE_OPC_MULLL_UU, TILE_OPC_MVNZ, | ||
1319 | TILE_OPC_MVZ, TILE_OPC_MZB, | ||
1320 | BITFIELD(18, 4) /* index 564 */, | ||
1321 | TILE_OPC_MZH, TILE_OPC_MZ, TILE_OPC_NOR, CHILD(581), TILE_OPC_PACKHB, | ||
1322 | TILE_OPC_PACKLB, TILE_OPC_RL, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_S3A, | ||
1323 | TILE_OPC_SADAB_U, TILE_OPC_SADAH, TILE_OPC_SADAH_U, TILE_OPC_SADB_U, | ||
1324 | TILE_OPC_SADH, TILE_OPC_SADH_U, | ||
1325 | BITFIELD(12, 2) /* index 581 */, | ||
1326 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(586), | ||
1327 | BITFIELD(14, 2) /* index 586 */, | ||
1328 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(591), | ||
1329 | BITFIELD(16, 2) /* index 591 */, | ||
1330 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
1331 | BITFIELD(18, 4) /* index 596 */, | ||
1332 | TILE_OPC_SEQB, TILE_OPC_SEQH, TILE_OPC_SEQ, TILE_OPC_SHLB, TILE_OPC_SHLH, | ||
1333 | TILE_OPC_SHL, TILE_OPC_SHRB, TILE_OPC_SHRH, TILE_OPC_SHR, TILE_OPC_SLTB, | ||
1334 | TILE_OPC_SLTB_U, TILE_OPC_SLTEB, TILE_OPC_SLTEB_U, TILE_OPC_SLTEH, | ||
1335 | TILE_OPC_SLTEH_U, TILE_OPC_SLTE, | ||
1336 | BITFIELD(18, 4) /* index 613 */, | ||
1337 | TILE_OPC_SLTE_U, TILE_OPC_SLTH, TILE_OPC_SLTH_U, TILE_OPC_SLT, | ||
1338 | TILE_OPC_SLT_U, TILE_OPC_SNEB, TILE_OPC_SNEH, TILE_OPC_SNE, TILE_OPC_SRAB, | ||
1339 | TILE_OPC_SRAH, TILE_OPC_SRA, TILE_OPC_SUBB, TILE_OPC_SUBH, TILE_OPC_SUB, | ||
1340 | TILE_OPC_XOR, TILE_OPC_DWORD_ALIGN, | ||
1341 | BITFIELD(18, 3) /* index 630 */, | ||
1342 | CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654), | ||
1343 | CHILD(657), CHILD(660), | ||
1344 | BITFIELD(21, 1) /* index 639 */, | ||
1345 | TILE_OPC_ADDS, TILE_OPC_NONE, | ||
1346 | BITFIELD(21, 1) /* index 642 */, | ||
1347 | TILE_OPC_SUBS, TILE_OPC_NONE, | ||
1348 | BITFIELD(21, 1) /* index 645 */, | ||
1349 | TILE_OPC_ADDBS_U, TILE_OPC_NONE, | ||
1350 | BITFIELD(21, 1) /* index 648 */, | ||
1351 | TILE_OPC_ADDHS, TILE_OPC_NONE, | ||
1352 | BITFIELD(21, 1) /* index 651 */, | ||
1353 | TILE_OPC_SUBBS_U, TILE_OPC_NONE, | ||
1354 | BITFIELD(21, 1) /* index 654 */, | ||
1355 | TILE_OPC_SUBHS, TILE_OPC_NONE, | ||
1356 | BITFIELD(21, 1) /* index 657 */, | ||
1357 | TILE_OPC_PACKHS, TILE_OPC_NONE, | ||
1358 | BITFIELD(21, 1) /* index 660 */, | ||
1359 | TILE_OPC_PACKBS_U, TILE_OPC_NONE, | ||
1360 | BITFIELD(18, 4) /* index 663 */, | ||
1361 | TILE_OPC_NONE, TILE_OPC_ADDB_SN, TILE_OPC_ADDH_SN, TILE_OPC_ADD_SN, | ||
1362 | TILE_OPC_ADIFFB_U_SN, TILE_OPC_ADIFFH_SN, TILE_OPC_AND_SN, | ||
1363 | TILE_OPC_AVGB_U_SN, TILE_OPC_AVGH_SN, TILE_OPC_CRC32_32_SN, | ||
1364 | TILE_OPC_CRC32_8_SN, TILE_OPC_INTHB_SN, TILE_OPC_INTHH_SN, | ||
1365 | TILE_OPC_INTLB_SN, TILE_OPC_INTLH_SN, TILE_OPC_MAXB_U_SN, | ||
1366 | BITFIELD(18, 4) /* index 680 */, | ||
1367 | TILE_OPC_MAXH_SN, TILE_OPC_MINB_U_SN, TILE_OPC_MINH_SN, TILE_OPC_MNZB_SN, | ||
1368 | TILE_OPC_MNZH_SN, TILE_OPC_MNZ_SN, TILE_OPC_MULHHA_SS_SN, | ||
1369 | TILE_OPC_MULHHA_SU_SN, TILE_OPC_MULHHA_UU_SN, TILE_OPC_MULHHSA_UU_SN, | ||
1370 | TILE_OPC_MULHH_SS_SN, TILE_OPC_MULHH_SU_SN, TILE_OPC_MULHH_UU_SN, | ||
1371 | TILE_OPC_MULHLA_SS_SN, TILE_OPC_MULHLA_SU_SN, TILE_OPC_MULHLA_US_SN, | ||
1372 | BITFIELD(18, 4) /* index 697 */, | ||
1373 | TILE_OPC_MULHLA_UU_SN, TILE_OPC_MULHLSA_UU_SN, TILE_OPC_MULHL_SS_SN, | ||
1374 | TILE_OPC_MULHL_SU_SN, TILE_OPC_MULHL_US_SN, TILE_OPC_MULHL_UU_SN, | ||
1375 | TILE_OPC_MULLLA_SS_SN, TILE_OPC_MULLLA_SU_SN, TILE_OPC_MULLLA_UU_SN, | ||
1376 | TILE_OPC_MULLLSA_UU_SN, TILE_OPC_MULLL_SS_SN, TILE_OPC_MULLL_SU_SN, | ||
1377 | TILE_OPC_MULLL_UU_SN, TILE_OPC_MVNZ_SN, TILE_OPC_MVZ_SN, TILE_OPC_MZB_SN, | ||
1378 | BITFIELD(18, 4) /* index 714 */, | ||
1379 | TILE_OPC_MZH_SN, TILE_OPC_MZ_SN, TILE_OPC_NOR_SN, CHILD(731), | ||
1380 | TILE_OPC_PACKHB_SN, TILE_OPC_PACKLB_SN, TILE_OPC_RL_SN, TILE_OPC_S1A_SN, | ||
1381 | TILE_OPC_S2A_SN, TILE_OPC_S3A_SN, TILE_OPC_SADAB_U_SN, TILE_OPC_SADAH_SN, | ||
1382 | TILE_OPC_SADAH_U_SN, TILE_OPC_SADB_U_SN, TILE_OPC_SADH_SN, | ||
1383 | TILE_OPC_SADH_U_SN, | ||
1384 | BITFIELD(12, 2) /* index 731 */, | ||
1385 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(736), | ||
1386 | BITFIELD(14, 2) /* index 736 */, | ||
1387 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(741), | ||
1388 | BITFIELD(16, 2) /* index 741 */, | ||
1389 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_MOVE_SN, | ||
1390 | BITFIELD(18, 4) /* index 746 */, | ||
1391 | TILE_OPC_SEQB_SN, TILE_OPC_SEQH_SN, TILE_OPC_SEQ_SN, TILE_OPC_SHLB_SN, | ||
1392 | TILE_OPC_SHLH_SN, TILE_OPC_SHL_SN, TILE_OPC_SHRB_SN, TILE_OPC_SHRH_SN, | ||
1393 | TILE_OPC_SHR_SN, TILE_OPC_SLTB_SN, TILE_OPC_SLTB_U_SN, TILE_OPC_SLTEB_SN, | ||
1394 | TILE_OPC_SLTEB_U_SN, TILE_OPC_SLTEH_SN, TILE_OPC_SLTEH_U_SN, | ||
1395 | TILE_OPC_SLTE_SN, | ||
1396 | BITFIELD(18, 4) /* index 763 */, | ||
1397 | TILE_OPC_SLTE_U_SN, TILE_OPC_SLTH_SN, TILE_OPC_SLTH_U_SN, TILE_OPC_SLT_SN, | ||
1398 | TILE_OPC_SLT_U_SN, TILE_OPC_SNEB_SN, TILE_OPC_SNEH_SN, TILE_OPC_SNE_SN, | ||
1399 | TILE_OPC_SRAB_SN, TILE_OPC_SRAH_SN, TILE_OPC_SRA_SN, TILE_OPC_SUBB_SN, | ||
1400 | TILE_OPC_SUBH_SN, TILE_OPC_SUB_SN, TILE_OPC_XOR_SN, TILE_OPC_DWORD_ALIGN_SN, | ||
1401 | BITFIELD(18, 3) /* index 780 */, | ||
1402 | CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804), | ||
1403 | CHILD(807), CHILD(810), | ||
1404 | BITFIELD(21, 1) /* index 789 */, | ||
1405 | TILE_OPC_ADDS_SN, TILE_OPC_NONE, | ||
1406 | BITFIELD(21, 1) /* index 792 */, | ||
1407 | TILE_OPC_SUBS_SN, TILE_OPC_NONE, | ||
1408 | BITFIELD(21, 1) /* index 795 */, | ||
1409 | TILE_OPC_ADDBS_U_SN, TILE_OPC_NONE, | ||
1410 | BITFIELD(21, 1) /* index 798 */, | ||
1411 | TILE_OPC_ADDHS_SN, TILE_OPC_NONE, | ||
1412 | BITFIELD(21, 1) /* index 801 */, | ||
1413 | TILE_OPC_SUBBS_U_SN, TILE_OPC_NONE, | ||
1414 | BITFIELD(21, 1) /* index 804 */, | ||
1415 | TILE_OPC_SUBHS_SN, TILE_OPC_NONE, | ||
1416 | BITFIELD(21, 1) /* index 807 */, | ||
1417 | TILE_OPC_PACKHS_SN, TILE_OPC_NONE, | ||
1418 | BITFIELD(21, 1) /* index 810 */, | ||
1419 | TILE_OPC_PACKBS_U_SN, TILE_OPC_NONE, | ||
1420 | BITFIELD(6, 2) /* index 813 */, | ||
1421 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(818), | ||
1422 | BITFIELD(8, 2) /* index 818 */, | ||
1423 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(823), | ||
1424 | BITFIELD(10, 2) /* index 823 */, | ||
1425 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_MOVELI_SN, | ||
1426 | BITFIELD(6, 2) /* index 828 */, | ||
1427 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(833), | ||
1428 | BITFIELD(8, 2) /* index 833 */, | ||
1429 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(838), | ||
1430 | BITFIELD(10, 2) /* index 838 */, | ||
1431 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_MOVELI, | ||
1432 | BITFIELD(0, 2) /* index 843 */, | ||
1433 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(848), | ||
1434 | BITFIELD(2, 2) /* index 848 */, | ||
1435 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(853), | ||
1436 | BITFIELD(4, 2) /* index 853 */, | ||
1437 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(858), | ||
1438 | BITFIELD(6, 2) /* index 858 */, | ||
1439 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(863), | ||
1440 | BITFIELD(8, 2) /* index 863 */, | ||
1441 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(868), | ||
1442 | BITFIELD(10, 2) /* index 868 */, | ||
1443 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_INFOL, | ||
1444 | BITFIELD(20, 2) /* index 873 */, | ||
1445 | TILE_OPC_NONE, TILE_OPC_ADDIB, TILE_OPC_ADDIH, TILE_OPC_ADDI, | ||
1446 | BITFIELD(20, 2) /* index 878 */, | ||
1447 | TILE_OPC_MAXIB_U, TILE_OPC_MAXIH, TILE_OPC_MINIB_U, TILE_OPC_MINIH, | ||
1448 | BITFIELD(20, 2) /* index 883 */, | ||
1449 | CHILD(888), TILE_OPC_SEQIB, TILE_OPC_SEQIH, TILE_OPC_SEQI, | ||
1450 | BITFIELD(6, 2) /* index 888 */, | ||
1451 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(893), | ||
1452 | BITFIELD(8, 2) /* index 893 */, | ||
1453 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(898), | ||
1454 | BITFIELD(10, 2) /* index 898 */, | ||
1455 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
1456 | BITFIELD(20, 2) /* index 903 */, | ||
1457 | TILE_OPC_SLTIB, TILE_OPC_SLTIB_U, TILE_OPC_SLTIH, TILE_OPC_SLTIH_U, | ||
1458 | BITFIELD(20, 2) /* index 908 */, | ||
1459 | TILE_OPC_SLTI, TILE_OPC_SLTI_U, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1460 | BITFIELD(20, 2) /* index 913 */, | ||
1461 | TILE_OPC_NONE, TILE_OPC_ADDIB_SN, TILE_OPC_ADDIH_SN, TILE_OPC_ADDI_SN, | ||
1462 | BITFIELD(20, 2) /* index 918 */, | ||
1463 | TILE_OPC_MAXIB_U_SN, TILE_OPC_MAXIH_SN, TILE_OPC_MINIB_U_SN, | ||
1464 | TILE_OPC_MINIH_SN, | ||
1465 | BITFIELD(20, 2) /* index 923 */, | ||
1466 | CHILD(928), TILE_OPC_SEQIB_SN, TILE_OPC_SEQIH_SN, TILE_OPC_SEQI_SN, | ||
1467 | BITFIELD(6, 2) /* index 928 */, | ||
1468 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(933), | ||
1469 | BITFIELD(8, 2) /* index 933 */, | ||
1470 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(938), | ||
1471 | BITFIELD(10, 2) /* index 938 */, | ||
1472 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_MOVEI_SN, | ||
1473 | BITFIELD(20, 2) /* index 943 */, | ||
1474 | TILE_OPC_SLTIB_SN, TILE_OPC_SLTIB_U_SN, TILE_OPC_SLTIH_SN, | ||
1475 | TILE_OPC_SLTIH_U_SN, | ||
1476 | BITFIELD(20, 2) /* index 948 */, | ||
1477 | TILE_OPC_SLTI_SN, TILE_OPC_SLTI_U_SN, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1478 | BITFIELD(20, 2) /* index 953 */, | ||
1479 | TILE_OPC_NONE, CHILD(958), TILE_OPC_XORI, TILE_OPC_NONE, | ||
1480 | BITFIELD(0, 2) /* index 958 */, | ||
1481 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(963), | ||
1482 | BITFIELD(2, 2) /* index 963 */, | ||
1483 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(968), | ||
1484 | BITFIELD(4, 2) /* index 968 */, | ||
1485 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(973), | ||
1486 | BITFIELD(6, 2) /* index 973 */, | ||
1487 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(978), | ||
1488 | BITFIELD(8, 2) /* index 978 */, | ||
1489 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(983), | ||
1490 | BITFIELD(10, 2) /* index 983 */, | ||
1491 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
1492 | BITFIELD(20, 2) /* index 988 */, | ||
1493 | TILE_OPC_NONE, TILE_OPC_ANDI_SN, TILE_OPC_XORI_SN, TILE_OPC_NONE, | ||
1494 | BITFIELD(17, 5) /* index 993 */, | ||
1495 | TILE_OPC_NONE, TILE_OPC_RLI, TILE_OPC_SHLIB, TILE_OPC_SHLIH, TILE_OPC_SHLI, | ||
1496 | TILE_OPC_SHRIB, TILE_OPC_SHRIH, TILE_OPC_SHRI, TILE_OPC_SRAIB, | ||
1497 | TILE_OPC_SRAIH, TILE_OPC_SRAI, CHILD(1026), TILE_OPC_NONE, TILE_OPC_NONE, | ||
1498 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1499 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1500 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1501 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1502 | BITFIELD(12, 4) /* index 1026 */, | ||
1503 | TILE_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052), | ||
1504 | CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067), | ||
1505 | CHILD(1070), CHILD(1073), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1506 | TILE_OPC_NONE, | ||
1507 | BITFIELD(16, 1) /* index 1043 */, | ||
1508 | TILE_OPC_BITX, TILE_OPC_NONE, | ||
1509 | BITFIELD(16, 1) /* index 1046 */, | ||
1510 | TILE_OPC_BYTEX, TILE_OPC_NONE, | ||
1511 | BITFIELD(16, 1) /* index 1049 */, | ||
1512 | TILE_OPC_CLZ, TILE_OPC_NONE, | ||
1513 | BITFIELD(16, 1) /* index 1052 */, | ||
1514 | TILE_OPC_CTZ, TILE_OPC_NONE, | ||
1515 | BITFIELD(16, 1) /* index 1055 */, | ||
1516 | TILE_OPC_FNOP, TILE_OPC_NONE, | ||
1517 | BITFIELD(16, 1) /* index 1058 */, | ||
1518 | TILE_OPC_NOP, TILE_OPC_NONE, | ||
1519 | BITFIELD(16, 1) /* index 1061 */, | ||
1520 | TILE_OPC_PCNT, TILE_OPC_NONE, | ||
1521 | BITFIELD(16, 1) /* index 1064 */, | ||
1522 | TILE_OPC_TBLIDXB0, TILE_OPC_NONE, | ||
1523 | BITFIELD(16, 1) /* index 1067 */, | ||
1524 | TILE_OPC_TBLIDXB1, TILE_OPC_NONE, | ||
1525 | BITFIELD(16, 1) /* index 1070 */, | ||
1526 | TILE_OPC_TBLIDXB2, TILE_OPC_NONE, | ||
1527 | BITFIELD(16, 1) /* index 1073 */, | ||
1528 | TILE_OPC_TBLIDXB3, TILE_OPC_NONE, | ||
1529 | BITFIELD(17, 5) /* index 1076 */, | ||
1530 | TILE_OPC_NONE, TILE_OPC_RLI_SN, TILE_OPC_SHLIB_SN, TILE_OPC_SHLIH_SN, | ||
1531 | TILE_OPC_SHLI_SN, TILE_OPC_SHRIB_SN, TILE_OPC_SHRIH_SN, TILE_OPC_SHRI_SN, | ||
1532 | TILE_OPC_SRAIB_SN, TILE_OPC_SRAIH_SN, TILE_OPC_SRAI_SN, CHILD(1109), | ||
1533 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1534 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1535 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1536 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1537 | BITFIELD(12, 4) /* index 1109 */, | ||
1538 | TILE_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135), | ||
1539 | CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144), | ||
1540 | CHILD(1147), CHILD(1150), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1541 | TILE_OPC_NONE, | ||
1542 | BITFIELD(16, 1) /* index 1126 */, | ||
1543 | TILE_OPC_BITX_SN, TILE_OPC_NONE, | ||
1544 | BITFIELD(16, 1) /* index 1129 */, | ||
1545 | TILE_OPC_BYTEX_SN, TILE_OPC_NONE, | ||
1546 | BITFIELD(16, 1) /* index 1132 */, | ||
1547 | TILE_OPC_CLZ_SN, TILE_OPC_NONE, | ||
1548 | BITFIELD(16, 1) /* index 1135 */, | ||
1549 | TILE_OPC_CTZ_SN, TILE_OPC_NONE, | ||
1550 | BITFIELD(16, 1) /* index 1138 */, | ||
1551 | TILE_OPC_PCNT_SN, TILE_OPC_NONE, | ||
1552 | BITFIELD(16, 1) /* index 1141 */, | ||
1553 | TILE_OPC_TBLIDXB0_SN, TILE_OPC_NONE, | ||
1554 | BITFIELD(16, 1) /* index 1144 */, | ||
1555 | TILE_OPC_TBLIDXB1_SN, TILE_OPC_NONE, | ||
1556 | BITFIELD(16, 1) /* index 1147 */, | ||
1557 | TILE_OPC_TBLIDXB2_SN, TILE_OPC_NONE, | ||
1558 | BITFIELD(16, 1) /* index 1150 */, | ||
1559 | TILE_OPC_TBLIDXB3_SN, TILE_OPC_NONE, | ||
1560 | }; | ||
1561 | |||
1562 | static const unsigned short decode_X1_fsm[1540] = | ||
1563 | { | ||
1564 | BITFIELD(54, 9) /* index 0 */, | ||
1565 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1566 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1567 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1568 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1569 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1570 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1571 | TILE_OPC_NONE, TILE_OPC_NONE, CHILD(513), CHILD(561), CHILD(594), | ||
1572 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1573 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1574 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(641), CHILD(689), | ||
1575 | CHILD(722), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1576 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1577 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(766), | ||
1578 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1579 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1580 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1581 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1582 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
1583 | CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1584 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1585 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1586 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1587 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
1588 | CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796), | ||
1589 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1590 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1591 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1592 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
1593 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826), | ||
1594 | CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), | ||
1595 | CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), | ||
1596 | CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843), | ||
1597 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1598 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
1599 | CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932), TILE_OPC_NONE, | ||
1600 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1601 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1602 | TILE_OPC_NONE, CHILD(941), CHILD(950), CHILD(974), CHILD(983), | ||
1603 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1604 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1605 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1606 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1607 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1608 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1609 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1610 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
1611 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, CHILD(992), | ||
1612 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1613 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1614 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1615 | CHILD(1334), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1616 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1617 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1618 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1619 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1620 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1621 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1622 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1623 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1624 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_J, TILE_OPC_J, | ||
1625 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1626 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1627 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1628 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1629 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1630 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1631 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1632 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1633 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1634 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
1635 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1636 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1637 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1638 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1639 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1640 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1641 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1642 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1643 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1644 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1645 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1646 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1647 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
1648 | TILE_OPC_JAL, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1649 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1650 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1651 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1652 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1653 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1654 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1655 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1656 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1657 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1658 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1659 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1660 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1661 | BITFIELD(49, 5) /* index 513 */, | ||
1662 | TILE_OPC_NONE, TILE_OPC_ADDB, TILE_OPC_ADDH, TILE_OPC_ADD, TILE_OPC_AND, | ||
1663 | TILE_OPC_INTHB, TILE_OPC_INTHH, TILE_OPC_INTLB, TILE_OPC_INTLH, | ||
1664 | TILE_OPC_JALRP, TILE_OPC_JALR, TILE_OPC_JRP, TILE_OPC_JR, TILE_OPC_LNK, | ||
1665 | TILE_OPC_MAXB_U, TILE_OPC_MAXH, TILE_OPC_MINB_U, TILE_OPC_MINH, | ||
1666 | TILE_OPC_MNZB, TILE_OPC_MNZH, TILE_OPC_MNZ, TILE_OPC_MZB, TILE_OPC_MZH, | ||
1667 | TILE_OPC_MZ, TILE_OPC_NOR, CHILD(546), TILE_OPC_PACKHB, TILE_OPC_PACKLB, | ||
1668 | TILE_OPC_RL, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_S3A, | ||
1669 | BITFIELD(43, 2) /* index 546 */, | ||
1670 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(551), | ||
1671 | BITFIELD(45, 2) /* index 551 */, | ||
1672 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(556), | ||
1673 | BITFIELD(47, 2) /* index 556 */, | ||
1674 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
1675 | BITFIELD(49, 5) /* index 561 */, | ||
1676 | TILE_OPC_SB, TILE_OPC_SEQB, TILE_OPC_SEQH, TILE_OPC_SEQ, TILE_OPC_SHLB, | ||
1677 | TILE_OPC_SHLH, TILE_OPC_SHL, TILE_OPC_SHRB, TILE_OPC_SHRH, TILE_OPC_SHR, | ||
1678 | TILE_OPC_SH, TILE_OPC_SLTB, TILE_OPC_SLTB_U, TILE_OPC_SLTEB, | ||
1679 | TILE_OPC_SLTEB_U, TILE_OPC_SLTEH, TILE_OPC_SLTEH_U, TILE_OPC_SLTE, | ||
1680 | TILE_OPC_SLTE_U, TILE_OPC_SLTH, TILE_OPC_SLTH_U, TILE_OPC_SLT, | ||
1681 | TILE_OPC_SLT_U, TILE_OPC_SNEB, TILE_OPC_SNEH, TILE_OPC_SNE, TILE_OPC_SRAB, | ||
1682 | TILE_OPC_SRAH, TILE_OPC_SRA, TILE_OPC_SUBB, TILE_OPC_SUBH, TILE_OPC_SUB, | ||
1683 | BITFIELD(49, 4) /* index 594 */, | ||
1684 | CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626), | ||
1685 | CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILE_OPC_NONE, | ||
1686 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1687 | BITFIELD(53, 1) /* index 611 */, | ||
1688 | TILE_OPC_SW, TILE_OPC_NONE, | ||
1689 | BITFIELD(53, 1) /* index 614 */, | ||
1690 | TILE_OPC_XOR, TILE_OPC_NONE, | ||
1691 | BITFIELD(53, 1) /* index 617 */, | ||
1692 | TILE_OPC_ADDS, TILE_OPC_NONE, | ||
1693 | BITFIELD(53, 1) /* index 620 */, | ||
1694 | TILE_OPC_SUBS, TILE_OPC_NONE, | ||
1695 | BITFIELD(53, 1) /* index 623 */, | ||
1696 | TILE_OPC_ADDBS_U, TILE_OPC_NONE, | ||
1697 | BITFIELD(53, 1) /* index 626 */, | ||
1698 | TILE_OPC_ADDHS, TILE_OPC_NONE, | ||
1699 | BITFIELD(53, 1) /* index 629 */, | ||
1700 | TILE_OPC_SUBBS_U, TILE_OPC_NONE, | ||
1701 | BITFIELD(53, 1) /* index 632 */, | ||
1702 | TILE_OPC_SUBHS, TILE_OPC_NONE, | ||
1703 | BITFIELD(53, 1) /* index 635 */, | ||
1704 | TILE_OPC_PACKHS, TILE_OPC_NONE, | ||
1705 | BITFIELD(53, 1) /* index 638 */, | ||
1706 | TILE_OPC_PACKBS_U, TILE_OPC_NONE, | ||
1707 | BITFIELD(49, 5) /* index 641 */, | ||
1708 | TILE_OPC_NONE, TILE_OPC_ADDB_SN, TILE_OPC_ADDH_SN, TILE_OPC_ADD_SN, | ||
1709 | TILE_OPC_AND_SN, TILE_OPC_INTHB_SN, TILE_OPC_INTHH_SN, TILE_OPC_INTLB_SN, | ||
1710 | TILE_OPC_INTLH_SN, TILE_OPC_JALRP, TILE_OPC_JALR, TILE_OPC_JRP, TILE_OPC_JR, | ||
1711 | TILE_OPC_LNK_SN, TILE_OPC_MAXB_U_SN, TILE_OPC_MAXH_SN, TILE_OPC_MINB_U_SN, | ||
1712 | TILE_OPC_MINH_SN, TILE_OPC_MNZB_SN, TILE_OPC_MNZH_SN, TILE_OPC_MNZ_SN, | ||
1713 | TILE_OPC_MZB_SN, TILE_OPC_MZH_SN, TILE_OPC_MZ_SN, TILE_OPC_NOR_SN, | ||
1714 | CHILD(674), TILE_OPC_PACKHB_SN, TILE_OPC_PACKLB_SN, TILE_OPC_RL_SN, | ||
1715 | TILE_OPC_S1A_SN, TILE_OPC_S2A_SN, TILE_OPC_S3A_SN, | ||
1716 | BITFIELD(43, 2) /* index 674 */, | ||
1717 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(679), | ||
1718 | BITFIELD(45, 2) /* index 679 */, | ||
1719 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(684), | ||
1720 | BITFIELD(47, 2) /* index 684 */, | ||
1721 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_MOVE_SN, | ||
1722 | BITFIELD(49, 5) /* index 689 */, | ||
1723 | TILE_OPC_SB, TILE_OPC_SEQB_SN, TILE_OPC_SEQH_SN, TILE_OPC_SEQ_SN, | ||
1724 | TILE_OPC_SHLB_SN, TILE_OPC_SHLH_SN, TILE_OPC_SHL_SN, TILE_OPC_SHRB_SN, | ||
1725 | TILE_OPC_SHRH_SN, TILE_OPC_SHR_SN, TILE_OPC_SH, TILE_OPC_SLTB_SN, | ||
1726 | TILE_OPC_SLTB_U_SN, TILE_OPC_SLTEB_SN, TILE_OPC_SLTEB_U_SN, | ||
1727 | TILE_OPC_SLTEH_SN, TILE_OPC_SLTEH_U_SN, TILE_OPC_SLTE_SN, | ||
1728 | TILE_OPC_SLTE_U_SN, TILE_OPC_SLTH_SN, TILE_OPC_SLTH_U_SN, TILE_OPC_SLT_SN, | ||
1729 | TILE_OPC_SLT_U_SN, TILE_OPC_SNEB_SN, TILE_OPC_SNEH_SN, TILE_OPC_SNE_SN, | ||
1730 | TILE_OPC_SRAB_SN, TILE_OPC_SRAH_SN, TILE_OPC_SRA_SN, TILE_OPC_SUBB_SN, | ||
1731 | TILE_OPC_SUBH_SN, TILE_OPC_SUB_SN, | ||
1732 | BITFIELD(49, 4) /* index 722 */, | ||
1733 | CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751), | ||
1734 | CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILE_OPC_NONE, | ||
1735 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1736 | BITFIELD(53, 1) /* index 739 */, | ||
1737 | TILE_OPC_XOR_SN, TILE_OPC_NONE, | ||
1738 | BITFIELD(53, 1) /* index 742 */, | ||
1739 | TILE_OPC_ADDS_SN, TILE_OPC_NONE, | ||
1740 | BITFIELD(53, 1) /* index 745 */, | ||
1741 | TILE_OPC_SUBS_SN, TILE_OPC_NONE, | ||
1742 | BITFIELD(53, 1) /* index 748 */, | ||
1743 | TILE_OPC_ADDBS_U_SN, TILE_OPC_NONE, | ||
1744 | BITFIELD(53, 1) /* index 751 */, | ||
1745 | TILE_OPC_ADDHS_SN, TILE_OPC_NONE, | ||
1746 | BITFIELD(53, 1) /* index 754 */, | ||
1747 | TILE_OPC_SUBBS_U_SN, TILE_OPC_NONE, | ||
1748 | BITFIELD(53, 1) /* index 757 */, | ||
1749 | TILE_OPC_SUBHS_SN, TILE_OPC_NONE, | ||
1750 | BITFIELD(53, 1) /* index 760 */, | ||
1751 | TILE_OPC_PACKHS_SN, TILE_OPC_NONE, | ||
1752 | BITFIELD(53, 1) /* index 763 */, | ||
1753 | TILE_OPC_PACKBS_U_SN, TILE_OPC_NONE, | ||
1754 | BITFIELD(37, 2) /* index 766 */, | ||
1755 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(771), | ||
1756 | BITFIELD(39, 2) /* index 771 */, | ||
1757 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(776), | ||
1758 | BITFIELD(41, 2) /* index 776 */, | ||
1759 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_MOVELI_SN, | ||
1760 | BITFIELD(37, 2) /* index 781 */, | ||
1761 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(786), | ||
1762 | BITFIELD(39, 2) /* index 786 */, | ||
1763 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(791), | ||
1764 | BITFIELD(41, 2) /* index 791 */, | ||
1765 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_MOVELI, | ||
1766 | BITFIELD(31, 2) /* index 796 */, | ||
1767 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(801), | ||
1768 | BITFIELD(33, 2) /* index 801 */, | ||
1769 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(806), | ||
1770 | BITFIELD(35, 2) /* index 806 */, | ||
1771 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(811), | ||
1772 | BITFIELD(37, 2) /* index 811 */, | ||
1773 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(816), | ||
1774 | BITFIELD(39, 2) /* index 816 */, | ||
1775 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(821), | ||
1776 | BITFIELD(41, 2) /* index 821 */, | ||
1777 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_INFOL, | ||
1778 | BITFIELD(31, 4) /* index 826 */, | ||
1779 | TILE_OPC_BZ, TILE_OPC_BZT, TILE_OPC_BNZ, TILE_OPC_BNZT, TILE_OPC_BGZ, | ||
1780 | TILE_OPC_BGZT, TILE_OPC_BGEZ, TILE_OPC_BGEZT, TILE_OPC_BLZ, TILE_OPC_BLZT, | ||
1781 | TILE_OPC_BLEZ, TILE_OPC_BLEZT, TILE_OPC_BBS, TILE_OPC_BBST, TILE_OPC_BBNS, | ||
1782 | TILE_OPC_BBNST, | ||
1783 | BITFIELD(31, 4) /* index 843 */, | ||
1784 | TILE_OPC_BZ_SN, TILE_OPC_BZT_SN, TILE_OPC_BNZ_SN, TILE_OPC_BNZT_SN, | ||
1785 | TILE_OPC_BGZ_SN, TILE_OPC_BGZT_SN, TILE_OPC_BGEZ_SN, TILE_OPC_BGEZT_SN, | ||
1786 | TILE_OPC_BLZ_SN, TILE_OPC_BLZT_SN, TILE_OPC_BLEZ_SN, TILE_OPC_BLEZT_SN, | ||
1787 | TILE_OPC_BBS_SN, TILE_OPC_BBST_SN, TILE_OPC_BBNS_SN, TILE_OPC_BBNST_SN, | ||
1788 | BITFIELD(51, 3) /* index 860 */, | ||
1789 | TILE_OPC_NONE, TILE_OPC_ADDIB, TILE_OPC_ADDIH, TILE_OPC_ADDI, CHILD(869), | ||
1790 | TILE_OPC_MAXIB_U, TILE_OPC_MAXIH, TILE_OPC_MFSPR, | ||
1791 | BITFIELD(31, 2) /* index 869 */, | ||
1792 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(874), | ||
1793 | BITFIELD(33, 2) /* index 874 */, | ||
1794 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(879), | ||
1795 | BITFIELD(35, 2) /* index 879 */, | ||
1796 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(884), | ||
1797 | BITFIELD(37, 2) /* index 884 */, | ||
1798 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(889), | ||
1799 | BITFIELD(39, 2) /* index 889 */, | ||
1800 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(894), | ||
1801 | BITFIELD(41, 2) /* index 894 */, | ||
1802 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
1803 | BITFIELD(51, 3) /* index 899 */, | ||
1804 | TILE_OPC_MINIB_U, TILE_OPC_MINIH, TILE_OPC_MTSPR, CHILD(908), | ||
1805 | TILE_OPC_SEQIB, TILE_OPC_SEQIH, TILE_OPC_SEQI, TILE_OPC_SLTIB, | ||
1806 | BITFIELD(37, 2) /* index 908 */, | ||
1807 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(913), | ||
1808 | BITFIELD(39, 2) /* index 913 */, | ||
1809 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(918), | ||
1810 | BITFIELD(41, 2) /* index 918 */, | ||
1811 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
1812 | BITFIELD(51, 3) /* index 923 */, | ||
1813 | TILE_OPC_SLTIB_U, TILE_OPC_SLTIH, TILE_OPC_SLTIH_U, TILE_OPC_SLTI, | ||
1814 | TILE_OPC_SLTI_U, TILE_OPC_XORI, TILE_OPC_LBADD, TILE_OPC_LBADD_U, | ||
1815 | BITFIELD(51, 3) /* index 932 */, | ||
1816 | TILE_OPC_LHADD, TILE_OPC_LHADD_U, TILE_OPC_LWADD, TILE_OPC_LWADD_NA, | ||
1817 | TILE_OPC_SBADD, TILE_OPC_SHADD, TILE_OPC_SWADD, TILE_OPC_NONE, | ||
1818 | BITFIELD(51, 3) /* index 941 */, | ||
1819 | TILE_OPC_NONE, TILE_OPC_ADDIB_SN, TILE_OPC_ADDIH_SN, TILE_OPC_ADDI_SN, | ||
1820 | TILE_OPC_ANDI_SN, TILE_OPC_MAXIB_U_SN, TILE_OPC_MAXIH_SN, TILE_OPC_MFSPR, | ||
1821 | BITFIELD(51, 3) /* index 950 */, | ||
1822 | TILE_OPC_MINIB_U_SN, TILE_OPC_MINIH_SN, TILE_OPC_MTSPR, CHILD(959), | ||
1823 | TILE_OPC_SEQIB_SN, TILE_OPC_SEQIH_SN, TILE_OPC_SEQI_SN, TILE_OPC_SLTIB_SN, | ||
1824 | BITFIELD(37, 2) /* index 959 */, | ||
1825 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(964), | ||
1826 | BITFIELD(39, 2) /* index 964 */, | ||
1827 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(969), | ||
1828 | BITFIELD(41, 2) /* index 969 */, | ||
1829 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_MOVEI_SN, | ||
1830 | BITFIELD(51, 3) /* index 974 */, | ||
1831 | TILE_OPC_SLTIB_U_SN, TILE_OPC_SLTIH_SN, TILE_OPC_SLTIH_U_SN, | ||
1832 | TILE_OPC_SLTI_SN, TILE_OPC_SLTI_U_SN, TILE_OPC_XORI_SN, TILE_OPC_LBADD_SN, | ||
1833 | TILE_OPC_LBADD_U_SN, | ||
1834 | BITFIELD(51, 3) /* index 983 */, | ||
1835 | TILE_OPC_LHADD_SN, TILE_OPC_LHADD_U_SN, TILE_OPC_LWADD_SN, | ||
1836 | TILE_OPC_LWADD_NA_SN, TILE_OPC_SBADD, TILE_OPC_SHADD, TILE_OPC_SWADD, | ||
1837 | TILE_OPC_NONE, | ||
1838 | BITFIELD(46, 7) /* index 992 */, | ||
1839 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(1121), | ||
1840 | CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124), CHILD(1124), | ||
1841 | CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127), CHILD(1127), | ||
1842 | CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1130), | ||
1843 | CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1136), | ||
1844 | CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139), CHILD(1139), | ||
1845 | CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142), CHILD(1142), | ||
1846 | CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1145), | ||
1847 | CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1151), | ||
1848 | CHILD(1242), CHILD(1290), CHILD(1323), TILE_OPC_NONE, TILE_OPC_NONE, | ||
1849 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1850 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1851 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1852 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1853 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1854 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1855 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1856 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1857 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1858 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1859 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1860 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1861 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1862 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1863 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1864 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1865 | BITFIELD(53, 1) /* index 1121 */, | ||
1866 | TILE_OPC_RLI, TILE_OPC_NONE, | ||
1867 | BITFIELD(53, 1) /* index 1124 */, | ||
1868 | TILE_OPC_SHLIB, TILE_OPC_NONE, | ||
1869 | BITFIELD(53, 1) /* index 1127 */, | ||
1870 | TILE_OPC_SHLIH, TILE_OPC_NONE, | ||
1871 | BITFIELD(53, 1) /* index 1130 */, | ||
1872 | TILE_OPC_SHLI, TILE_OPC_NONE, | ||
1873 | BITFIELD(53, 1) /* index 1133 */, | ||
1874 | TILE_OPC_SHRIB, TILE_OPC_NONE, | ||
1875 | BITFIELD(53, 1) /* index 1136 */, | ||
1876 | TILE_OPC_SHRIH, TILE_OPC_NONE, | ||
1877 | BITFIELD(53, 1) /* index 1139 */, | ||
1878 | TILE_OPC_SHRI, TILE_OPC_NONE, | ||
1879 | BITFIELD(53, 1) /* index 1142 */, | ||
1880 | TILE_OPC_SRAIB, TILE_OPC_NONE, | ||
1881 | BITFIELD(53, 1) /* index 1145 */, | ||
1882 | TILE_OPC_SRAIH, TILE_OPC_NONE, | ||
1883 | BITFIELD(53, 1) /* index 1148 */, | ||
1884 | TILE_OPC_SRAI, TILE_OPC_NONE, | ||
1885 | BITFIELD(43, 3) /* index 1151 */, | ||
1886 | TILE_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169), | ||
1887 | CHILD(1172), CHILD(1175), CHILD(1178), | ||
1888 | BITFIELD(53, 1) /* index 1160 */, | ||
1889 | TILE_OPC_DRAIN, TILE_OPC_NONE, | ||
1890 | BITFIELD(53, 1) /* index 1163 */, | ||
1891 | TILE_OPC_DTLBPR, TILE_OPC_NONE, | ||
1892 | BITFIELD(53, 1) /* index 1166 */, | ||
1893 | TILE_OPC_FINV, TILE_OPC_NONE, | ||
1894 | BITFIELD(53, 1) /* index 1169 */, | ||
1895 | TILE_OPC_FLUSH, TILE_OPC_NONE, | ||
1896 | BITFIELD(53, 1) /* index 1172 */, | ||
1897 | TILE_OPC_FNOP, TILE_OPC_NONE, | ||
1898 | BITFIELD(53, 1) /* index 1175 */, | ||
1899 | TILE_OPC_ICOH, TILE_OPC_NONE, | ||
1900 | BITFIELD(31, 2) /* index 1178 */, | ||
1901 | CHILD(1183), CHILD(1211), CHILD(1239), CHILD(1239), | ||
1902 | BITFIELD(53, 1) /* index 1183 */, | ||
1903 | CHILD(1186), TILE_OPC_NONE, | ||
1904 | BITFIELD(33, 2) /* index 1186 */, | ||
1905 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_ILL, CHILD(1191), | ||
1906 | BITFIELD(35, 2) /* index 1191 */, | ||
1907 | TILE_OPC_ILL, CHILD(1196), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1908 | BITFIELD(37, 2) /* index 1196 */, | ||
1909 | TILE_OPC_ILL, CHILD(1201), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1910 | BITFIELD(39, 2) /* index 1201 */, | ||
1911 | TILE_OPC_ILL, CHILD(1206), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1912 | BITFIELD(41, 2) /* index 1206 */, | ||
1913 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_BPT, TILE_OPC_ILL, | ||
1914 | BITFIELD(53, 1) /* index 1211 */, | ||
1915 | CHILD(1214), TILE_OPC_NONE, | ||
1916 | BITFIELD(33, 2) /* index 1214 */, | ||
1917 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_ILL, CHILD(1219), | ||
1918 | BITFIELD(35, 2) /* index 1219 */, | ||
1919 | TILE_OPC_ILL, CHILD(1224), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1920 | BITFIELD(37, 2) /* index 1224 */, | ||
1921 | TILE_OPC_ILL, CHILD(1229), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1922 | BITFIELD(39, 2) /* index 1229 */, | ||
1923 | TILE_OPC_ILL, CHILD(1234), TILE_OPC_ILL, TILE_OPC_ILL, | ||
1924 | BITFIELD(41, 2) /* index 1234 */, | ||
1925 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_RAISE, TILE_OPC_ILL, | ||
1926 | BITFIELD(53, 1) /* index 1239 */, | ||
1927 | TILE_OPC_ILL, TILE_OPC_NONE, | ||
1928 | BITFIELD(43, 3) /* index 1242 */, | ||
1929 | CHILD(1251), CHILD(1254), CHILD(1257), CHILD(1275), CHILD(1278), | ||
1930 | CHILD(1281), CHILD(1284), CHILD(1287), | ||
1931 | BITFIELD(53, 1) /* index 1251 */, | ||
1932 | TILE_OPC_INV, TILE_OPC_NONE, | ||
1933 | BITFIELD(53, 1) /* index 1254 */, | ||
1934 | TILE_OPC_IRET, TILE_OPC_NONE, | ||
1935 | BITFIELD(53, 1) /* index 1257 */, | ||
1936 | CHILD(1260), TILE_OPC_NONE, | ||
1937 | BITFIELD(31, 2) /* index 1260 */, | ||
1938 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(1265), | ||
1939 | BITFIELD(33, 2) /* index 1265 */, | ||
1940 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(1270), | ||
1941 | BITFIELD(35, 2) /* index 1270 */, | ||
1942 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_PREFETCH, | ||
1943 | BITFIELD(53, 1) /* index 1275 */, | ||
1944 | TILE_OPC_LB_U, TILE_OPC_NONE, | ||
1945 | BITFIELD(53, 1) /* index 1278 */, | ||
1946 | TILE_OPC_LH, TILE_OPC_NONE, | ||
1947 | BITFIELD(53, 1) /* index 1281 */, | ||
1948 | TILE_OPC_LH_U, TILE_OPC_NONE, | ||
1949 | BITFIELD(53, 1) /* index 1284 */, | ||
1950 | TILE_OPC_LW, TILE_OPC_NONE, | ||
1951 | BITFIELD(53, 1) /* index 1287 */, | ||
1952 | TILE_OPC_MF, TILE_OPC_NONE, | ||
1953 | BITFIELD(43, 3) /* index 1290 */, | ||
1954 | CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), | ||
1955 | CHILD(1314), CHILD(1317), CHILD(1320), | ||
1956 | BITFIELD(53, 1) /* index 1299 */, | ||
1957 | TILE_OPC_NAP, TILE_OPC_NONE, | ||
1958 | BITFIELD(53, 1) /* index 1302 */, | ||
1959 | TILE_OPC_NOP, TILE_OPC_NONE, | ||
1960 | BITFIELD(53, 1) /* index 1305 */, | ||
1961 | TILE_OPC_SWINT0, TILE_OPC_NONE, | ||
1962 | BITFIELD(53, 1) /* index 1308 */, | ||
1963 | TILE_OPC_SWINT1, TILE_OPC_NONE, | ||
1964 | BITFIELD(53, 1) /* index 1311 */, | ||
1965 | TILE_OPC_SWINT2, TILE_OPC_NONE, | ||
1966 | BITFIELD(53, 1) /* index 1314 */, | ||
1967 | TILE_OPC_SWINT3, TILE_OPC_NONE, | ||
1968 | BITFIELD(53, 1) /* index 1317 */, | ||
1969 | TILE_OPC_TNS, TILE_OPC_NONE, | ||
1970 | BITFIELD(53, 1) /* index 1320 */, | ||
1971 | TILE_OPC_WH64, TILE_OPC_NONE, | ||
1972 | BITFIELD(43, 2) /* index 1323 */, | ||
1973 | CHILD(1328), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1974 | BITFIELD(45, 1) /* index 1328 */, | ||
1975 | CHILD(1331), TILE_OPC_NONE, | ||
1976 | BITFIELD(53, 1) /* index 1331 */, | ||
1977 | TILE_OPC_LW_NA, TILE_OPC_NONE, | ||
1978 | BITFIELD(46, 7) /* index 1334 */, | ||
1979 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(1463), | ||
1980 | CHILD(1463), CHILD(1463), CHILD(1463), CHILD(1466), CHILD(1466), | ||
1981 | CHILD(1466), CHILD(1466), CHILD(1469), CHILD(1469), CHILD(1469), | ||
1982 | CHILD(1469), CHILD(1472), CHILD(1472), CHILD(1472), CHILD(1472), | ||
1983 | CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1475), CHILD(1478), | ||
1984 | CHILD(1478), CHILD(1478), CHILD(1478), CHILD(1481), CHILD(1481), | ||
1985 | CHILD(1481), CHILD(1481), CHILD(1484), CHILD(1484), CHILD(1484), | ||
1986 | CHILD(1484), CHILD(1487), CHILD(1487), CHILD(1487), CHILD(1487), | ||
1987 | CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1490), CHILD(1151), | ||
1988 | CHILD(1493), CHILD(1517), CHILD(1529), TILE_OPC_NONE, TILE_OPC_NONE, | ||
1989 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1990 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1991 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1992 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1993 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1994 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1995 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1996 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1997 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1998 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
1999 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2000 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2001 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2002 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2003 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2004 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2005 | BITFIELD(53, 1) /* index 1463 */, | ||
2006 | TILE_OPC_RLI_SN, TILE_OPC_NONE, | ||
2007 | BITFIELD(53, 1) /* index 1466 */, | ||
2008 | TILE_OPC_SHLIB_SN, TILE_OPC_NONE, | ||
2009 | BITFIELD(53, 1) /* index 1469 */, | ||
2010 | TILE_OPC_SHLIH_SN, TILE_OPC_NONE, | ||
2011 | BITFIELD(53, 1) /* index 1472 */, | ||
2012 | TILE_OPC_SHLI_SN, TILE_OPC_NONE, | ||
2013 | BITFIELD(53, 1) /* index 1475 */, | ||
2014 | TILE_OPC_SHRIB_SN, TILE_OPC_NONE, | ||
2015 | BITFIELD(53, 1) /* index 1478 */, | ||
2016 | TILE_OPC_SHRIH_SN, TILE_OPC_NONE, | ||
2017 | BITFIELD(53, 1) /* index 1481 */, | ||
2018 | TILE_OPC_SHRI_SN, TILE_OPC_NONE, | ||
2019 | BITFIELD(53, 1) /* index 1484 */, | ||
2020 | TILE_OPC_SRAIB_SN, TILE_OPC_NONE, | ||
2021 | BITFIELD(53, 1) /* index 1487 */, | ||
2022 | TILE_OPC_SRAIH_SN, TILE_OPC_NONE, | ||
2023 | BITFIELD(53, 1) /* index 1490 */, | ||
2024 | TILE_OPC_SRAI_SN, TILE_OPC_NONE, | ||
2025 | BITFIELD(43, 3) /* index 1493 */, | ||
2026 | CHILD(1251), CHILD(1254), CHILD(1502), CHILD(1505), CHILD(1508), | ||
2027 | CHILD(1511), CHILD(1514), CHILD(1287), | ||
2028 | BITFIELD(53, 1) /* index 1502 */, | ||
2029 | TILE_OPC_LB_SN, TILE_OPC_NONE, | ||
2030 | BITFIELD(53, 1) /* index 1505 */, | ||
2031 | TILE_OPC_LB_U_SN, TILE_OPC_NONE, | ||
2032 | BITFIELD(53, 1) /* index 1508 */, | ||
2033 | TILE_OPC_LH_SN, TILE_OPC_NONE, | ||
2034 | BITFIELD(53, 1) /* index 1511 */, | ||
2035 | TILE_OPC_LH_U_SN, TILE_OPC_NONE, | ||
2036 | BITFIELD(53, 1) /* index 1514 */, | ||
2037 | TILE_OPC_LW_SN, TILE_OPC_NONE, | ||
2038 | BITFIELD(43, 3) /* index 1517 */, | ||
2039 | CHILD(1299), CHILD(1302), CHILD(1305), CHILD(1308), CHILD(1311), | ||
2040 | CHILD(1314), CHILD(1526), CHILD(1320), | ||
2041 | BITFIELD(53, 1) /* index 1526 */, | ||
2042 | TILE_OPC_TNS_SN, TILE_OPC_NONE, | ||
2043 | BITFIELD(43, 2) /* index 1529 */, | ||
2044 | CHILD(1534), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2045 | BITFIELD(45, 1) /* index 1534 */, | ||
2046 | CHILD(1537), TILE_OPC_NONE, | ||
2047 | BITFIELD(53, 1) /* index 1537 */, | ||
2048 | TILE_OPC_LW_NA_SN, TILE_OPC_NONE, | ||
2049 | }; | ||
2050 | |||
2051 | static const unsigned short decode_Y0_fsm[168] = | ||
2052 | { | ||
2053 | BITFIELD(27, 4) /* index 0 */, | ||
2054 | TILE_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), | ||
2055 | CHILD(57), CHILD(62), CHILD(67), TILE_OPC_ADDI, CHILD(72), CHILD(102), | ||
2056 | TILE_OPC_SEQI, CHILD(117), TILE_OPC_SLTI, TILE_OPC_SLTI_U, | ||
2057 | BITFIELD(18, 2) /* index 17 */, | ||
2058 | TILE_OPC_ADD, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_SUB, | ||
2059 | BITFIELD(18, 2) /* index 22 */, | ||
2060 | TILE_OPC_MNZ, TILE_OPC_MVNZ, TILE_OPC_MVZ, TILE_OPC_MZ, | ||
2061 | BITFIELD(18, 2) /* index 27 */, | ||
2062 | TILE_OPC_AND, TILE_OPC_NOR, CHILD(32), TILE_OPC_XOR, | ||
2063 | BITFIELD(12, 2) /* index 32 */, | ||
2064 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(37), | ||
2065 | BITFIELD(14, 2) /* index 37 */, | ||
2066 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(42), | ||
2067 | BITFIELD(16, 2) /* index 42 */, | ||
2068 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
2069 | BITFIELD(18, 2) /* index 47 */, | ||
2070 | TILE_OPC_RL, TILE_OPC_SHL, TILE_OPC_SHR, TILE_OPC_SRA, | ||
2071 | BITFIELD(18, 2) /* index 52 */, | ||
2072 | TILE_OPC_SLTE, TILE_OPC_SLTE_U, TILE_OPC_SLT, TILE_OPC_SLT_U, | ||
2073 | BITFIELD(18, 2) /* index 57 */, | ||
2074 | TILE_OPC_MULHLSA_UU, TILE_OPC_S3A, TILE_OPC_SEQ, TILE_OPC_SNE, | ||
2075 | BITFIELD(18, 2) /* index 62 */, | ||
2076 | TILE_OPC_MULHH_SS, TILE_OPC_MULHH_UU, TILE_OPC_MULLL_SS, TILE_OPC_MULLL_UU, | ||
2077 | BITFIELD(18, 2) /* index 67 */, | ||
2078 | TILE_OPC_MULHHA_SS, TILE_OPC_MULHHA_UU, TILE_OPC_MULLLA_SS, | ||
2079 | TILE_OPC_MULLLA_UU, | ||
2080 | BITFIELD(0, 2) /* index 72 */, | ||
2081 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(77), | ||
2082 | BITFIELD(2, 2) /* index 77 */, | ||
2083 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(82), | ||
2084 | BITFIELD(4, 2) /* index 82 */, | ||
2085 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(87), | ||
2086 | BITFIELD(6, 2) /* index 87 */, | ||
2087 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(92), | ||
2088 | BITFIELD(8, 2) /* index 92 */, | ||
2089 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(97), | ||
2090 | BITFIELD(10, 2) /* index 97 */, | ||
2091 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
2092 | BITFIELD(6, 2) /* index 102 */, | ||
2093 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(107), | ||
2094 | BITFIELD(8, 2) /* index 107 */, | ||
2095 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(112), | ||
2096 | BITFIELD(10, 2) /* index 112 */, | ||
2097 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
2098 | BITFIELD(15, 5) /* index 117 */, | ||
2099 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_RLI, | ||
2100 | TILE_OPC_RLI, TILE_OPC_RLI, TILE_OPC_RLI, TILE_OPC_SHLI, TILE_OPC_SHLI, | ||
2101 | TILE_OPC_SHLI, TILE_OPC_SHLI, TILE_OPC_SHRI, TILE_OPC_SHRI, TILE_OPC_SHRI, | ||
2102 | TILE_OPC_SHRI, TILE_OPC_SRAI, TILE_OPC_SRAI, TILE_OPC_SRAI, TILE_OPC_SRAI, | ||
2103 | CHILD(150), CHILD(159), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2104 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2105 | TILE_OPC_NONE, TILE_OPC_NONE, | ||
2106 | BITFIELD(12, 3) /* index 150 */, | ||
2107 | TILE_OPC_NONE, TILE_OPC_BITX, TILE_OPC_BYTEX, TILE_OPC_CLZ, TILE_OPC_CTZ, | ||
2108 | TILE_OPC_FNOP, TILE_OPC_NOP, TILE_OPC_PCNT, | ||
2109 | BITFIELD(12, 3) /* index 159 */, | ||
2110 | TILE_OPC_TBLIDXB0, TILE_OPC_TBLIDXB1, TILE_OPC_TBLIDXB2, TILE_OPC_TBLIDXB3, | ||
2111 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2112 | }; | ||
2113 | |||
2114 | static const unsigned short decode_Y1_fsm[140] = | ||
2115 | { | ||
2116 | BITFIELD(59, 4) /* index 0 */, | ||
2117 | TILE_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), | ||
2118 | CHILD(57), TILE_OPC_ADDI, CHILD(62), CHILD(92), TILE_OPC_SEQI, CHILD(107), | ||
2119 | TILE_OPC_SLTI, TILE_OPC_SLTI_U, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2120 | BITFIELD(49, 2) /* index 17 */, | ||
2121 | TILE_OPC_ADD, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_SUB, | ||
2122 | BITFIELD(49, 2) /* index 22 */, | ||
2123 | TILE_OPC_NONE, TILE_OPC_MNZ, TILE_OPC_MZ, TILE_OPC_NONE, | ||
2124 | BITFIELD(49, 2) /* index 27 */, | ||
2125 | TILE_OPC_AND, TILE_OPC_NOR, CHILD(32), TILE_OPC_XOR, | ||
2126 | BITFIELD(43, 2) /* index 32 */, | ||
2127 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(37), | ||
2128 | BITFIELD(45, 2) /* index 37 */, | ||
2129 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(42), | ||
2130 | BITFIELD(47, 2) /* index 42 */, | ||
2131 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
2132 | BITFIELD(49, 2) /* index 47 */, | ||
2133 | TILE_OPC_RL, TILE_OPC_SHL, TILE_OPC_SHR, TILE_OPC_SRA, | ||
2134 | BITFIELD(49, 2) /* index 52 */, | ||
2135 | TILE_OPC_SLTE, TILE_OPC_SLTE_U, TILE_OPC_SLT, TILE_OPC_SLT_U, | ||
2136 | BITFIELD(49, 2) /* index 57 */, | ||
2137 | TILE_OPC_NONE, TILE_OPC_S3A, TILE_OPC_SEQ, TILE_OPC_SNE, | ||
2138 | BITFIELD(31, 2) /* index 62 */, | ||
2139 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(67), | ||
2140 | BITFIELD(33, 2) /* index 67 */, | ||
2141 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(72), | ||
2142 | BITFIELD(35, 2) /* index 72 */, | ||
2143 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(77), | ||
2144 | BITFIELD(37, 2) /* index 77 */, | ||
2145 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(82), | ||
2146 | BITFIELD(39, 2) /* index 82 */, | ||
2147 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(87), | ||
2148 | BITFIELD(41, 2) /* index 87 */, | ||
2149 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
2150 | BITFIELD(37, 2) /* index 92 */, | ||
2151 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(97), | ||
2152 | BITFIELD(39, 2) /* index 97 */, | ||
2153 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(102), | ||
2154 | BITFIELD(41, 2) /* index 102 */, | ||
2155 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
2156 | BITFIELD(48, 3) /* index 107 */, | ||
2157 | TILE_OPC_NONE, TILE_OPC_RLI, TILE_OPC_SHLI, TILE_OPC_SHRI, TILE_OPC_SRAI, | ||
2158 | CHILD(116), TILE_OPC_NONE, TILE_OPC_NONE, | ||
2159 | BITFIELD(43, 3) /* index 116 */, | ||
2160 | TILE_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILE_OPC_NONE, | ||
2161 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2162 | BITFIELD(46, 2) /* index 125 */, | ||
2163 | TILE_OPC_FNOP, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2164 | BITFIELD(46, 2) /* index 130 */, | ||
2165 | TILE_OPC_ILL, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2166 | BITFIELD(46, 2) /* index 135 */, | ||
2167 | TILE_OPC_NOP, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
2168 | }; | ||
2169 | |||
2170 | static const unsigned short decode_Y2_fsm[24] = | ||
2171 | { | ||
2172 | BITFIELD(56, 3) /* index 0 */, | ||
2173 | CHILD(9), TILE_OPC_LB_U, TILE_OPC_LH, TILE_OPC_LH_U, TILE_OPC_LW, | ||
2174 | TILE_OPC_SB, TILE_OPC_SH, TILE_OPC_SW, | ||
2175 | BITFIELD(20, 2) /* index 9 */, | ||
2176 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(14), | ||
2177 | BITFIELD(22, 2) /* index 14 */, | ||
2178 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(19), | ||
2179 | BITFIELD(24, 2) /* index 19 */, | ||
2180 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_PREFETCH, | ||
2181 | }; | ||
2182 | |||
2183 | #undef BITFIELD | ||
2184 | #undef CHILD | ||
2185 | const unsigned short * const | ||
2186 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS] = | ||
2187 | { | ||
2188 | decode_X0_fsm, | ||
2189 | decode_X1_fsm, | ||
2190 | decode_Y0_fsm, | ||
2191 | decode_Y1_fsm, | ||
2192 | decode_Y2_fsm | ||
2193 | }; | ||
2194 | const struct tile_operand tile_operands[43] = | ||
2195 | { | ||
2196 | { | ||
2197 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_X0), | ||
2198 | 8, 1, 0, 0, 0, 0, | ||
2199 | create_Imm8_X0, get_Imm8_X0 | ||
2200 | }, | ||
2201 | { | ||
2202 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_X1), | ||
2203 | 8, 1, 0, 0, 0, 0, | ||
2204 | create_Imm8_X1, get_Imm8_X1 | ||
2205 | }, | ||
2206 | { | ||
2207 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_Y0), | ||
2208 | 8, 1, 0, 0, 0, 0, | ||
2209 | create_Imm8_Y0, get_Imm8_Y0 | ||
2210 | }, | ||
2211 | { | ||
2212 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM8_Y1), | ||
2213 | 8, 1, 0, 0, 0, 0, | ||
2214 | create_Imm8_Y1, get_Imm8_Y1 | ||
2215 | }, | ||
2216 | { | ||
2217 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM16_X0), | ||
2218 | 16, 1, 0, 0, 0, 0, | ||
2219 | create_Imm16_X0, get_Imm16_X0 | ||
2220 | }, | ||
2221 | { | ||
2222 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_IMM16_X1), | ||
2223 | 16, 1, 0, 0, 0, 0, | ||
2224 | create_Imm16_X1, get_Imm16_X1 | ||
2225 | }, | ||
2226 | { | ||
2227 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(TILE_JOFFLONG_X1), | ||
2228 | 29, 1, 0, 0, 1, TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, | ||
2229 | create_JOffLong_X1, get_JOffLong_X1 | ||
2230 | }, | ||
2231 | { | ||
2232 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2233 | 6, 0, 0, 1, 0, 0, | ||
2234 | create_Dest_X0, get_Dest_X0 | ||
2235 | }, | ||
2236 | { | ||
2237 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2238 | 6, 0, 1, 0, 0, 0, | ||
2239 | create_SrcA_X0, get_SrcA_X0 | ||
2240 | }, | ||
2241 | { | ||
2242 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2243 | 6, 0, 0, 1, 0, 0, | ||
2244 | create_Dest_X1, get_Dest_X1 | ||
2245 | }, | ||
2246 | { | ||
2247 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2248 | 6, 0, 1, 0, 0, 0, | ||
2249 | create_SrcA_X1, get_SrcA_X1 | ||
2250 | }, | ||
2251 | { | ||
2252 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2253 | 6, 0, 0, 1, 0, 0, | ||
2254 | create_Dest_Y0, get_Dest_Y0 | ||
2255 | }, | ||
2256 | { | ||
2257 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2258 | 6, 0, 1, 0, 0, 0, | ||
2259 | create_SrcA_Y0, get_SrcA_Y0 | ||
2260 | }, | ||
2261 | { | ||
2262 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2263 | 6, 0, 0, 1, 0, 0, | ||
2264 | create_Dest_Y1, get_Dest_Y1 | ||
2265 | }, | ||
2266 | { | ||
2267 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2268 | 6, 0, 1, 0, 0, 0, | ||
2269 | create_SrcA_Y1, get_SrcA_Y1 | ||
2270 | }, | ||
2271 | { | ||
2272 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2273 | 6, 0, 1, 0, 0, 0, | ||
2274 | create_SrcA_Y2, get_SrcA_Y2 | ||
2275 | }, | ||
2276 | { | ||
2277 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2278 | 6, 0, 1, 0, 0, 0, | ||
2279 | create_SrcB_X0, get_SrcB_X0 | ||
2280 | }, | ||
2281 | { | ||
2282 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2283 | 6, 0, 1, 0, 0, 0, | ||
2284 | create_SrcB_X1, get_SrcB_X1 | ||
2285 | }, | ||
2286 | { | ||
2287 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2288 | 6, 0, 1, 0, 0, 0, | ||
2289 | create_SrcB_Y0, get_SrcB_Y0 | ||
2290 | }, | ||
2291 | { | ||
2292 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2293 | 6, 0, 1, 0, 0, 0, | ||
2294 | create_SrcB_Y1, get_SrcB_Y1 | ||
2295 | }, | ||
2296 | { | ||
2297 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(TILE_BROFF_X1), | ||
2298 | 17, 1, 0, 0, 1, TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, | ||
2299 | create_BrOff_X1, get_BrOff_X1 | ||
2300 | }, | ||
2301 | { | ||
2302 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2303 | 6, 0, 1, 1, 0, 0, | ||
2304 | create_Dest_X0, get_Dest_X0 | ||
2305 | }, | ||
2306 | { | ||
2307 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(NONE), | ||
2308 | 28, 1, 0, 0, 1, TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, | ||
2309 | create_JOff_X1, get_JOff_X1 | ||
2310 | }, | ||
2311 | { | ||
2312 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2313 | 6, 0, 0, 1, 0, 0, | ||
2314 | create_SrcBDest_Y2, get_SrcBDest_Y2 | ||
2315 | }, | ||
2316 | { | ||
2317 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2318 | 6, 0, 1, 1, 0, 0, | ||
2319 | create_SrcA_X1, get_SrcA_X1 | ||
2320 | }, | ||
2321 | { | ||
2322 | TILE_OP_TYPE_SPR, BFD_RELOC(TILE_MF_IMM15_X1), | ||
2323 | 15, 0, 0, 0, 0, 0, | ||
2324 | create_MF_Imm15_X1, get_MF_Imm15_X1 | ||
2325 | }, | ||
2326 | { | ||
2327 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMSTART_X0), | ||
2328 | 5, 0, 0, 0, 0, 0, | ||
2329 | create_MMStart_X0, get_MMStart_X0 | ||
2330 | }, | ||
2331 | { | ||
2332 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMEND_X0), | ||
2333 | 5, 0, 0, 0, 0, 0, | ||
2334 | create_MMEnd_X0, get_MMEnd_X0 | ||
2335 | }, | ||
2336 | { | ||
2337 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMSTART_X1), | ||
2338 | 5, 0, 0, 0, 0, 0, | ||
2339 | create_MMStart_X1, get_MMStart_X1 | ||
2340 | }, | ||
2341 | { | ||
2342 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_MMEND_X1), | ||
2343 | 5, 0, 0, 0, 0, 0, | ||
2344 | create_MMEnd_X1, get_MMEnd_X1 | ||
2345 | }, | ||
2346 | { | ||
2347 | TILE_OP_TYPE_SPR, BFD_RELOC(TILE_MT_IMM15_X1), | ||
2348 | 15, 0, 0, 0, 0, 0, | ||
2349 | create_MT_Imm15_X1, get_MT_Imm15_X1 | ||
2350 | }, | ||
2351 | { | ||
2352 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2353 | 6, 0, 1, 1, 0, 0, | ||
2354 | create_Dest_Y0, get_Dest_Y0 | ||
2355 | }, | ||
2356 | { | ||
2357 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_X0), | ||
2358 | 5, 0, 0, 0, 0, 0, | ||
2359 | create_ShAmt_X0, get_ShAmt_X0 | ||
2360 | }, | ||
2361 | { | ||
2362 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_X1), | ||
2363 | 5, 0, 0, 0, 0, 0, | ||
2364 | create_ShAmt_X1, get_ShAmt_X1 | ||
2365 | }, | ||
2366 | { | ||
2367 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_Y0), | ||
2368 | 5, 0, 0, 0, 0, 0, | ||
2369 | create_ShAmt_Y0, get_ShAmt_Y0 | ||
2370 | }, | ||
2371 | { | ||
2372 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SHAMT_Y1), | ||
2373 | 5, 0, 0, 0, 0, 0, | ||
2374 | create_ShAmt_Y1, get_ShAmt_Y1 | ||
2375 | }, | ||
2376 | { | ||
2377 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2378 | 6, 0, 1, 0, 0, 0, | ||
2379 | create_SrcBDest_Y2, get_SrcBDest_Y2 | ||
2380 | }, | ||
2381 | { | ||
2382 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(NONE), | ||
2383 | 8, 1, 0, 0, 0, 0, | ||
2384 | create_Dest_Imm8_X1, get_Dest_Imm8_X1 | ||
2385 | }, | ||
2386 | { | ||
2387 | TILE_OP_TYPE_ADDRESS, BFD_RELOC(TILE_SN_BROFF), | ||
2388 | 10, 1, 0, 0, 1, TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES, | ||
2389 | create_BrOff_SN, get_BrOff_SN | ||
2390 | }, | ||
2391 | { | ||
2392 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SN_UIMM8), | ||
2393 | 8, 0, 0, 0, 0, 0, | ||
2394 | create_Imm8_SN, get_Imm8_SN | ||
2395 | }, | ||
2396 | { | ||
2397 | TILE_OP_TYPE_IMMEDIATE, BFD_RELOC(TILE_SN_IMM8), | ||
2398 | 8, 1, 0, 0, 0, 0, | ||
2399 | create_Imm8_SN, get_Imm8_SN | ||
2400 | }, | ||
2401 | { | ||
2402 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2403 | 2, 0, 0, 1, 0, 0, | ||
2404 | create_Dest_SN, get_Dest_SN | ||
2405 | }, | ||
2406 | { | ||
2407 | TILE_OP_TYPE_REGISTER, BFD_RELOC(NONE), | ||
2408 | 2, 0, 1, 0, 0, 0, | ||
2409 | create_Src_SN, get_Src_SN | ||
2410 | } | ||
2411 | }; | ||
2412 | |||
2413 | |||
2414 | |||
2415 | |||
2416 | /* Given a set of bundle bits and the lookup FSM for a specific pipe, | ||
2417 | * returns which instruction the bundle contains in that pipe. | ||
2418 | */ | ||
2419 | static const struct tile_opcode * | ||
2420 | find_opcode(tile_bundle_bits bits, const unsigned short *table) | ||
2421 | { | ||
2422 | int index = 0; | ||
2423 | |||
2424 | while (1) | ||
2425 | { | ||
2426 | unsigned short bitspec = table[index]; | ||
2427 | unsigned int bitfield = | ||
2428 | ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6); | ||
2429 | |||
2430 | unsigned short next = table[index + 1 + bitfield]; | ||
2431 | if (next <= TILE_OPC_NONE) | ||
2432 | return &tile_opcodes[next]; | ||
2433 | |||
2434 | index = next - TILE_OPC_NONE; | ||
2435 | } | ||
2436 | } | ||
2437 | |||
2438 | |||
2439 | int | ||
2440 | parse_insn_tile(tile_bundle_bits bits, | ||
2441 | unsigned int pc, | ||
2442 | struct tile_decoded_instruction | ||
2443 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]) | ||
2444 | { | ||
2445 | int num_instructions = 0; | ||
2446 | int pipe; | ||
2447 | |||
2448 | int min_pipe, max_pipe; | ||
2449 | if ((bits & TILE_BUNDLE_Y_ENCODING_MASK) == 0) | ||
2450 | { | ||
2451 | min_pipe = TILE_PIPELINE_X0; | ||
2452 | max_pipe = TILE_PIPELINE_X1; | ||
2453 | } | ||
2454 | else | ||
2455 | { | ||
2456 | min_pipe = TILE_PIPELINE_Y0; | ||
2457 | max_pipe = TILE_PIPELINE_Y2; | ||
2458 | } | ||
2459 | |||
2460 | /* For each pipe, find an instruction that fits. */ | ||
2461 | for (pipe = min_pipe; pipe <= max_pipe; pipe++) | ||
2462 | { | ||
2463 | const struct tile_opcode *opc; | ||
2464 | struct tile_decoded_instruction *d; | ||
2465 | int i; | ||
2466 | |||
2467 | d = &decoded[num_instructions++]; | ||
2468 | opc = find_opcode (bits, tile_bundle_decoder_fsms[pipe]); | ||
2469 | d->opcode = opc; | ||
2470 | |||
2471 | /* Decode each operand, sign extending, etc. as appropriate. */ | ||
2472 | for (i = 0; i < opc->num_operands; i++) | ||
2473 | { | ||
2474 | const struct tile_operand *op = | ||
2475 | &tile_operands[opc->operands[pipe][i]]; | ||
2476 | int opval = op->extract (bits); | ||
2477 | if (op->is_signed) | ||
2478 | { | ||
2479 | /* Sign-extend the operand. */ | ||
2480 | int shift = (int)((sizeof(int) * 8) - op->num_bits); | ||
2481 | opval = (opval << shift) >> shift; | ||
2482 | } | ||
2483 | |||
2484 | /* Adjust PC-relative scaled branch offsets. */ | ||
2485 | if (op->type == TILE_OP_TYPE_ADDRESS) | ||
2486 | { | ||
2487 | opval *= TILE_BUNDLE_SIZE_IN_BYTES; | ||
2488 | opval += (int)pc; | ||
2489 | } | ||
2490 | |||
2491 | /* Record the final value. */ | ||
2492 | d->operands[i] = op; | ||
2493 | d->operand_values[i] = opval; | ||
2494 | } | ||
2495 | } | ||
2496 | |||
2497 | return num_instructions; | ||
2498 | } | ||
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c new file mode 100644 index 000000000000..b9ab25a889b5 --- /dev/null +++ b/arch/tile/kernel/time.c | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Support the cycle counter clocksource and tile timer clock event device. | ||
15 | */ | ||
16 | |||
17 | #include <linux/time.h> | ||
18 | #include <linux/timex.h> | ||
19 | #include <linux/clocksource.h> | ||
20 | #include <linux/clockchips.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <asm/irq_regs.h> | ||
26 | #include <asm/traps.h> | ||
27 | #include <hv/hypervisor.h> | ||
28 | #include <arch/interrupts.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | |||
32 | /* | ||
33 | * Define the cycle counter clock source. | ||
34 | */ | ||
35 | |||
36 | /* How many cycles per second we are running at. */ | ||
37 | static cycles_t cycles_per_sec __write_once; | ||
38 | |||
39 | /* | ||
40 | * We set up shift and multiply values with a minsec of five seconds, | ||
41 | * since our timer counter counts down 31 bits at a frequency of | ||
42 | * no less than 500 MHz. See @minsec for clocks_calc_mult_shift(). | ||
43 | * We could use a different value for the 64-bit free-running | ||
44 | * cycle counter, but we use the same one for consistency, and since | ||
45 | * we will be reasonably precise with this value anyway. | ||
46 | */ | ||
47 | #define TILE_MINSEC 5 | ||
48 | |||
49 | cycles_t get_clock_rate(void) | ||
50 | { | ||
51 | return cycles_per_sec; | ||
52 | } | ||
53 | |||
54 | #if CHIP_HAS_SPLIT_CYCLE() | ||
55 | cycles_t get_cycles(void) | ||
56 | { | ||
57 | unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH); | ||
58 | unsigned int low = __insn_mfspr(SPR_CYCLE_LOW); | ||
59 | unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH); | ||
60 | |||
61 | while (unlikely(high != high2)) { | ||
62 | low = __insn_mfspr(SPR_CYCLE_LOW); | ||
63 | high = high2; | ||
64 | high2 = __insn_mfspr(SPR_CYCLE_HIGH); | ||
65 | } | ||
66 | |||
67 | return (((cycles_t)high) << 32) | low; | ||
68 | } | ||
69 | #endif | ||
70 | |||
71 | static cycles_t clocksource_get_cycles(struct clocksource *cs) | ||
72 | { | ||
73 | return get_cycles(); | ||
74 | } | ||
75 | |||
76 | static struct clocksource cycle_counter_cs = { | ||
77 | .name = "cycle counter", | ||
78 | .rating = 300, | ||
79 | .read = clocksource_get_cycles, | ||
80 | .mask = CLOCKSOURCE_MASK(64), | ||
81 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * Called very early from setup_arch() to set cycles_per_sec. | ||
86 | * We initialize it early so we can use it to set up loops_per_jiffy. | ||
87 | */ | ||
88 | void __init setup_clock(void) | ||
89 | { | ||
90 | cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); | ||
91 | clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, | ||
92 | TILE_MINSEC); | ||
93 | } | ||
94 | |||
95 | void __init calibrate_delay(void) | ||
96 | { | ||
97 | loops_per_jiffy = get_clock_rate() / HZ; | ||
98 | pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", | ||
99 | loops_per_jiffy/(500000/HZ), | ||
100 | (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); | ||
101 | } | ||
102 | |||
103 | /* Called fairly late in init/main.c, but before we go smp. */ | ||
104 | void __init time_init(void) | ||
105 | { | ||
106 | /* Initialize and register the clock source. */ | ||
107 | clocksource_register(&cycle_counter_cs); | ||
108 | |||
109 | /* Start up the tile-timer interrupt source on the boot cpu. */ | ||
110 | setup_tile_timer(); | ||
111 | } | ||
112 | |||
113 | |||
114 | /* | ||
115 | * Define the tile timer clock event device. The timer is driven by | ||
116 | * the TILE_TIMER_CONTROL register, which consists of a 31-bit down | ||
117 | * counter, plus bit 31, which signifies that the counter has wrapped | ||
118 | * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be | ||
119 | * raised as long as bit 31 is set. | ||
120 | */ | ||
121 | |||
122 | #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ | ||
123 | |||
124 | static int tile_timer_set_next_event(unsigned long ticks, | ||
125 | struct clock_event_device *evt) | ||
126 | { | ||
127 | BUG_ON(ticks > MAX_TICK); | ||
128 | __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); | ||
129 | raw_local_irq_unmask_now(INT_TILE_TIMER); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * Whenever anyone tries to change modes, we just mask interrupts | ||
135 | * and wait for the next event to get set. | ||
136 | */ | ||
137 | static void tile_timer_set_mode(enum clock_event_mode mode, | ||
138 | struct clock_event_device *evt) | ||
139 | { | ||
140 | raw_local_irq_mask_now(INT_TILE_TIMER); | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Set min_delta_ns to 1 microsecond, since it takes about | ||
145 | * that long to fire the interrupt. | ||
146 | */ | ||
147 | static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = { | ||
148 | .name = "tile timer", | ||
149 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
150 | .min_delta_ns = 1000, | ||
151 | .rating = 100, | ||
152 | .irq = -1, | ||
153 | .set_next_event = tile_timer_set_next_event, | ||
154 | .set_mode = tile_timer_set_mode, | ||
155 | }; | ||
156 | |||
157 | void __cpuinit setup_tile_timer(void) | ||
158 | { | ||
159 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | ||
160 | |||
161 | /* Fill in fields that are speed-specific. */ | ||
162 | clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); | ||
163 | evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt); | ||
164 | |||
165 | /* Mark as being for this cpu only. */ | ||
166 | evt->cpumask = cpumask_of(smp_processor_id()); | ||
167 | |||
168 | /* Start out with timer not firing. */ | ||
169 | raw_local_irq_mask_now(INT_TILE_TIMER); | ||
170 | |||
171 | /* Register tile timer. */ | ||
172 | clockevents_register_device(evt); | ||
173 | } | ||
174 | |||
175 | /* Called from the interrupt vector. */ | ||
176 | void do_timer_interrupt(struct pt_regs *regs, int fault_num) | ||
177 | { | ||
178 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
179 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | ||
180 | |||
181 | /* | ||
182 | * Mask the timer interrupt here, since we are a oneshot timer | ||
183 | * and there are now by definition no events pending. | ||
184 | */ | ||
185 | raw_local_irq_mask(INT_TILE_TIMER); | ||
186 | |||
187 | /* Track time spent here in an interrupt context */ | ||
188 | irq_enter(); | ||
189 | |||
190 | /* Track interrupt count. */ | ||
191 | __get_cpu_var(irq_stat).irq_timer_count++; | ||
192 | |||
193 | /* Call the generic timer handler */ | ||
194 | evt->event_handler(evt); | ||
195 | |||
196 | /* | ||
197 | * Track time spent against the current process again and | ||
198 | * process any softirqs if they are waiting. | ||
199 | */ | ||
200 | irq_exit(); | ||
201 | |||
202 | set_irq_regs(old_regs); | ||
203 | } | ||
204 | |||
205 | /* | ||
206 | * Scheduler clock - returns current time in nanosec units. | ||
207 | * Note that with LOCKDEP, this is called during lockdep_init(), and | ||
208 | * we will claim that sched_clock() is zero for a little while, until | ||
209 | * we run setup_clock(), above. | ||
210 | */ | ||
211 | unsigned long long sched_clock(void) | ||
212 | { | ||
213 | return clocksource_cyc2ns(get_cycles(), | ||
214 | cycle_counter_cs.mult, | ||
215 | cycle_counter_cs.shift); | ||
216 | } | ||
217 | |||
218 | int setup_profiling_timer(unsigned int multiplier) | ||
219 | { | ||
220 | return -EINVAL; | ||
221 | } | ||
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c new file mode 100644 index 000000000000..2dffc1044d83 --- /dev/null +++ b/arch/tile/kernel/tlb.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/cpumask.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/homecache.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | /* From tlbflush.h */ | ||
23 | DEFINE_PER_CPU(int, current_asid); | ||
24 | int min_asid, max_asid; | ||
25 | |||
26 | /* | ||
27 | * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB | ||
28 | * so that when we are unmapping an executable page, we also flush it. | ||
29 | * Combined with flushing the L1I at context switch time, this means | ||
30 | * we don't have to do any other icache flushes. | ||
31 | */ | ||
32 | |||
33 | void flush_tlb_mm(struct mm_struct *mm) | ||
34 | { | ||
35 | HV_Remote_ASID asids[NR_CPUS]; | ||
36 | int i = 0, cpu; | ||
37 | for_each_cpu(cpu, &mm->cpu_vm_mask) { | ||
38 | HV_Remote_ASID *asid = &asids[i++]; | ||
39 | asid->y = cpu / smp_topology.width; | ||
40 | asid->x = cpu % smp_topology.width; | ||
41 | asid->asid = per_cpu(current_asid, cpu); | ||
42 | } | ||
43 | flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, | ||
44 | 0, 0, 0, NULL, asids, i); | ||
45 | } | ||
46 | |||
47 | void flush_tlb_current_task(void) | ||
48 | { | ||
49 | flush_tlb_mm(current->mm); | ||
50 | } | ||
51 | |||
52 | void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm, | ||
53 | unsigned long va) | ||
54 | { | ||
55 | unsigned long size = hv_page_size(vma); | ||
56 | int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; | ||
57 | flush_remote(0, cache, &mm->cpu_vm_mask, | ||
58 | va, size, size, &mm->cpu_vm_mask, NULL, 0); | ||
59 | } | ||
60 | |||
61 | void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) | ||
62 | { | ||
63 | flush_tlb_page_mm(vma, vma->vm_mm, va); | ||
64 | } | ||
65 | EXPORT_SYMBOL(flush_tlb_page); | ||
66 | |||
67 | void flush_tlb_range(const struct vm_area_struct *vma, | ||
68 | unsigned long start, unsigned long end) | ||
69 | { | ||
70 | unsigned long size = hv_page_size(vma); | ||
71 | struct mm_struct *mm = vma->vm_mm; | ||
72 | int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; | ||
73 | flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, | ||
74 | &mm->cpu_vm_mask, NULL, 0); | ||
75 | } | ||
76 | |||
77 | void flush_tlb_all(void) | ||
78 | { | ||
79 | int i; | ||
80 | for (i = 0; ; ++i) { | ||
81 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
82 | if (r.size == 0) | ||
83 | break; | ||
84 | flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, | ||
85 | r.start, r.size, PAGE_SIZE, cpu_online_mask, | ||
86 | NULL, 0); | ||
87 | flush_remote(0, 0, NULL, | ||
88 | r.start, r.size, HPAGE_SIZE, cpu_online_mask, | ||
89 | NULL, 0); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
94 | { | ||
95 | flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, | ||
96 | start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); | ||
97 | } | ||
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c new file mode 100644 index 000000000000..3870abbeeaa2 --- /dev/null +++ b/arch/tile/kernel/traps.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <asm/opcode-tile.h> | ||
23 | #include <asm/opcode_constants.h> | ||
24 | #include <asm/stack.h> | ||
25 | #include <asm/traps.h> | ||
26 | |||
27 | #include <arch/interrupts.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | void __init trap_init(void) | ||
31 | { | ||
32 | /* Nothing needed here since we link code at .intrpt1 */ | ||
33 | } | ||
34 | |||
35 | int unaligned_fixup = 1; | ||
36 | |||
37 | static int __init setup_unaligned_fixup(char *str) | ||
38 | { | ||
39 | /* | ||
40 | * Say "=-1" to completely disable it. If you just do "=0", we | ||
41 | * will still parse the instruction, then fire a SIGBUS with | ||
42 | * the correct address from inside the single_step code. | ||
43 | */ | ||
44 | long val; | ||
45 | if (strict_strtol(str, 0, &val) != 0) | ||
46 | return 0; | ||
47 | unaligned_fixup = val; | ||
48 | pr_info("Fixups for unaligned data accesses are %s\n", | ||
49 | unaligned_fixup >= 0 ? | ||
50 | (unaligned_fixup ? "enabled" : "disabled") : | ||
51 | "completely disabled"); | ||
52 | return 1; | ||
53 | } | ||
54 | __setup("unaligned_fixup=", setup_unaligned_fixup); | ||
55 | |||
56 | #if CHIP_HAS_TILE_DMA() | ||
57 | |||
58 | static int dma_disabled; | ||
59 | |||
60 | static int __init nodma(char *str) | ||
61 | { | ||
62 | pr_info("User-space DMA is disabled\n"); | ||
63 | dma_disabled = 1; | ||
64 | return 1; | ||
65 | } | ||
66 | __setup("nodma", nodma); | ||
67 | |||
68 | /* How to decode SPR_GPV_REASON */ | ||
69 | #define IRET_ERROR (1U << 31) | ||
70 | #define MT_ERROR (1U << 30) | ||
71 | #define MF_ERROR (1U << 29) | ||
72 | #define SPR_INDEX ((1U << 15) - 1) | ||
73 | #define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */ | ||
74 | |||
75 | /* | ||
76 | * See if this GPV is just to notify the kernel of SPR use and we can | ||
77 | * retry the user instruction after adjusting some MPLs suitably. | ||
78 | */ | ||
79 | static int retry_gpv(unsigned int gpv_reason) | ||
80 | { | ||
81 | int mpl; | ||
82 | |||
83 | if (gpv_reason & IRET_ERROR) | ||
84 | return 0; | ||
85 | |||
86 | BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0); | ||
87 | mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT; | ||
88 | if (mpl == INT_DMA_NOTIFY && !dma_disabled) { | ||
89 | /* User is turning on DMA. Allow it and retry. */ | ||
90 | printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n", | ||
91 | current->pid, current->comm); | ||
92 | BUG_ON(current->thread.tile_dma_state.enabled); | ||
93 | current->thread.tile_dma_state.enabled = 1; | ||
94 | grant_dma_mpls(); | ||
95 | return 1; | ||
96 | } | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | #endif /* CHIP_HAS_TILE_DMA() */ | ||
102 | |||
103 | #ifdef __tilegx__ | ||
104 | #define bundle_bits tilegx_bundle_bits | ||
105 | #else | ||
106 | #define bundle_bits tile_bundle_bits | ||
107 | #endif | ||
108 | |||
109 | extern bundle_bits bpt_code; | ||
110 | |||
111 | asm(".pushsection .rodata.bpt_code,\"a\";" | ||
112 | ".align 8;" | ||
113 | "bpt_code: bpt;" | ||
114 | ".size bpt_code,.-bpt_code;" | ||
115 | ".popsection"); | ||
116 | |||
117 | static int special_ill(bundle_bits bundle, int *sigp, int *codep) | ||
118 | { | ||
119 | int sig, code, maxcode; | ||
120 | |||
121 | if (bundle == bpt_code) { | ||
122 | *sigp = SIGTRAP; | ||
123 | *codep = TRAP_BRKPT; | ||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | /* If it's a "raise" bundle, then "ill" must be in pipe X1. */ | ||
128 | #ifdef __tilegx__ | ||
129 | if ((bundle & TILEGX_BUNDLE_MODE_MASK) != 0) | ||
130 | return 0; | ||
131 | if (get_Opcode_X1(bundle) != UNARY_OPCODE_X1) | ||
132 | return 0; | ||
133 | if (get_UnaryOpcodeExtension_X1(bundle) != ILL_UNARY_OPCODE_X1) | ||
134 | return 0; | ||
135 | #else | ||
136 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) | ||
137 | return 0; | ||
138 | if (get_Opcode_X1(bundle) != SHUN_0_OPCODE_X1) | ||
139 | return 0; | ||
140 | if (get_UnShOpcodeExtension_X1(bundle) != UN_0_SHUN_0_OPCODE_X1) | ||
141 | return 0; | ||
142 | if (get_UnOpcodeExtension_X1(bundle) != ILL_UN_0_SHUN_0_OPCODE_X1) | ||
143 | return 0; | ||
144 | #endif | ||
145 | |||
146 | /* Check that the magic distinguishers are set to mean "raise". */ | ||
147 | if (get_Dest_X1(bundle) != 29 || get_SrcA_X1(bundle) != 37) | ||
148 | return 0; | ||
149 | |||
150 | /* There must be an "addli zero, zero, VAL" in X0. */ | ||
151 | if (get_Opcode_X0(bundle) != ADDLI_OPCODE_X0) | ||
152 | return 0; | ||
153 | if (get_Dest_X0(bundle) != TREG_ZERO) | ||
154 | return 0; | ||
155 | if (get_SrcA_X0(bundle) != TREG_ZERO) | ||
156 | return 0; | ||
157 | |||
158 | /* | ||
159 | * Validate the proposed signal number and si_code value. | ||
160 | * Note that we embed these in the static instruction itself | ||
161 | * so that we perturb the register state as little as possible | ||
162 | * at the time of the actual fault; it's unlikely you'd ever | ||
163 | * need to dynamically choose which kind of fault to raise | ||
164 | * from user space. | ||
165 | */ | ||
166 | sig = get_Imm16_X0(bundle) & 0x3f; | ||
167 | switch (sig) { | ||
168 | case SIGILL: | ||
169 | maxcode = NSIGILL; | ||
170 | break; | ||
171 | case SIGFPE: | ||
172 | maxcode = NSIGFPE; | ||
173 | break; | ||
174 | case SIGSEGV: | ||
175 | maxcode = NSIGSEGV; | ||
176 | break; | ||
177 | case SIGBUS: | ||
178 | maxcode = NSIGBUS; | ||
179 | break; | ||
180 | case SIGTRAP: | ||
181 | maxcode = NSIGTRAP; | ||
182 | break; | ||
183 | default: | ||
184 | return 0; | ||
185 | } | ||
186 | code = (get_Imm16_X0(bundle) >> 6) & 0xf; | ||
187 | if (code <= 0 || code > maxcode) | ||
188 | return 0; | ||
189 | |||
190 | /* Make it the requested signal. */ | ||
191 | *sigp = sig; | ||
192 | *codep = code | __SI_FAULT; | ||
193 | return 1; | ||
194 | } | ||
195 | |||
196 | void __kprobes do_trap(struct pt_regs *regs, int fault_num, | ||
197 | unsigned long reason) | ||
198 | { | ||
199 | siginfo_t info = { 0 }; | ||
200 | int signo, code; | ||
201 | unsigned long address; | ||
202 | bundle_bits instr; | ||
203 | |||
204 | /* Re-enable interrupts. */ | ||
205 | local_irq_enable(); | ||
206 | |||
207 | /* | ||
208 | * If it hits in kernel mode and we can't fix it up, just exit the | ||
209 | * current process and hope for the best. | ||
210 | */ | ||
211 | if (!user_mode(regs)) { | ||
212 | if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ | ||
213 | return; | ||
214 | pr_alert("Kernel took bad trap %d at PC %#lx\n", | ||
215 | fault_num, regs->pc); | ||
216 | if (fault_num == INT_GPV) | ||
217 | pr_alert("GPV_REASON is %#lx\n", reason); | ||
218 | show_regs(regs); | ||
219 | do_exit(SIGKILL); /* FIXME: implement i386 die() */ | ||
220 | return; | ||
221 | } | ||
222 | |||
223 | switch (fault_num) { | ||
224 | case INT_ILL: | ||
225 | if (copy_from_user(&instr, (void __user *)regs->pc, | ||
226 | sizeof(instr))) { | ||
227 | pr_err("Unreadable instruction for INT_ILL:" | ||
228 | " %#lx\n", regs->pc); | ||
229 | do_exit(SIGKILL); | ||
230 | return; | ||
231 | } | ||
232 | if (!special_ill(instr, &signo, &code)) { | ||
233 | signo = SIGILL; | ||
234 | code = ILL_ILLOPC; | ||
235 | } | ||
236 | address = regs->pc; | ||
237 | break; | ||
238 | case INT_GPV: | ||
239 | #if CHIP_HAS_TILE_DMA() | ||
240 | if (retry_gpv(reason)) | ||
241 | return; | ||
242 | #endif | ||
243 | /*FALLTHROUGH*/ | ||
244 | case INT_UDN_ACCESS: | ||
245 | case INT_IDN_ACCESS: | ||
246 | #if CHIP_HAS_SN() | ||
247 | case INT_SN_ACCESS: | ||
248 | #endif | ||
249 | signo = SIGILL; | ||
250 | code = ILL_PRVREG; | ||
251 | address = regs->pc; | ||
252 | break; | ||
253 | case INT_SWINT_3: | ||
254 | case INT_SWINT_2: | ||
255 | case INT_SWINT_0: | ||
256 | signo = SIGILL; | ||
257 | code = ILL_ILLTRP; | ||
258 | address = regs->pc; | ||
259 | break; | ||
260 | case INT_UNALIGN_DATA: | ||
261 | #ifndef __tilegx__ /* FIXME: GX: no single-step yet */ | ||
262 | if (unaligned_fixup >= 0) { | ||
263 | struct single_step_state *state = | ||
264 | current_thread_info()->step_state; | ||
265 | if (!state || | ||
266 | (void __user *)(regs->pc) != state->buffer) { | ||
267 | single_step_once(regs); | ||
268 | return; | ||
269 | } | ||
270 | } | ||
271 | #endif | ||
272 | signo = SIGBUS; | ||
273 | code = BUS_ADRALN; | ||
274 | address = 0; | ||
275 | break; | ||
276 | case INT_DOUBLE_FAULT: | ||
277 | /* | ||
278 | * For double fault, "reason" is actually passed as | ||
279 | * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so | ||
280 | * we can provide the original fault number rather than | ||
281 | * the uninteresting "INT_DOUBLE_FAULT" so the user can | ||
282 | * learn what actually struck while PL0 ICS was set. | ||
283 | */ | ||
284 | fault_num = reason; | ||
285 | signo = SIGILL; | ||
286 | code = ILL_DBLFLT; | ||
287 | address = regs->pc; | ||
288 | break; | ||
289 | #ifdef __tilegx__ | ||
290 | case INT_ILL_TRANS: | ||
291 | signo = SIGSEGV; | ||
292 | code = SEGV_MAPERR; | ||
293 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) | ||
294 | address = regs->pc; | ||
295 | else | ||
296 | address = 0; /* FIXME: GX: single-step for address */ | ||
297 | break; | ||
298 | #endif | ||
299 | default: | ||
300 | panic("Unexpected do_trap interrupt number %d", fault_num); | ||
301 | return; | ||
302 | } | ||
303 | |||
304 | info.si_signo = signo; | ||
305 | info.si_code = code; | ||
306 | info.si_addr = (void __user *)address; | ||
307 | if (signo == SIGILL) | ||
308 | info.si_trapno = fault_num; | ||
309 | force_sig_info(signo, &info, current); | ||
310 | } | ||
311 | |||
312 | void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | ||
313 | { | ||
314 | _dump_stack(dummy, pc, lr, sp, r52); | ||
315 | pr_emerg("Double fault: exiting\n"); | ||
316 | machine_halt(); | ||
317 | } | ||
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..25fdc0c1839a --- /dev/null +++ b/arch/tile/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | #include <asm/page.h> | ||
3 | #include <asm/cache.h> | ||
4 | #include <asm/thread_info.h> | ||
5 | #include <hv/hypervisor.h> | ||
6 | |||
7 | /* Text loads starting from the supervisor interrupt vector address. */ | ||
8 | #define TEXT_OFFSET MEM_SV_INTRPT | ||
9 | |||
10 | OUTPUT_ARCH(tile) | ||
11 | ENTRY(_start) | ||
12 | jiffies = jiffies_64; | ||
13 | |||
14 | PHDRS | ||
15 | { | ||
16 | intrpt1 PT_LOAD ; | ||
17 | text PT_LOAD ; | ||
18 | data PT_LOAD ; | ||
19 | } | ||
20 | SECTIONS | ||
21 | { | ||
22 | /* Text is loaded with a different VA than data; start with text. */ | ||
23 | #undef LOAD_OFFSET | ||
24 | #define LOAD_OFFSET TEXT_OFFSET | ||
25 | |||
26 | /* Interrupt vectors */ | ||
27 | .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ | ||
28 | { | ||
29 | _text = .; | ||
30 | _stext = .; | ||
31 | *(.intrpt1) | ||
32 | } :intrpt1 =0 | ||
33 | |||
34 | /* Hypervisor call vectors */ | ||
35 | #include "hvglue.lds" | ||
36 | |||
37 | /* Now the real code */ | ||
38 | . = ALIGN(0x20000); | ||
39 | .text : AT (ADDR(.text) - LOAD_OFFSET) { | ||
40 | HEAD_TEXT | ||
41 | SCHED_TEXT | ||
42 | LOCK_TEXT | ||
43 | __fix_text_end = .; /* tile-cpack won't rearrange before this */ | ||
44 | TEXT_TEXT | ||
45 | *(.text.*) | ||
46 | *(.coldtext*) | ||
47 | *(.fixup) | ||
48 | *(.gnu.warning) | ||
49 | } :text =0 | ||
50 | _etext = .; | ||
51 | |||
52 | /* "Init" is divided into two areas with very different virtual addresses. */ | ||
53 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
54 | |||
55 | /* Now we skip back to PAGE_OFFSET for the data. */ | ||
56 | . = (. - TEXT_OFFSET + PAGE_OFFSET); | ||
57 | #undef LOAD_OFFSET | ||
58 | #define LOAD_OFFSET PAGE_OFFSET | ||
59 | |||
60 | . = ALIGN(PAGE_SIZE); | ||
61 | VMLINUX_SYMBOL(_sinitdata) = .; | ||
62 | .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { | ||
63 | *(.init.page) | ||
64 | } :data =0 | ||
65 | INIT_DATA_SECTION(16) | ||
66 | PERCPU(PAGE_SIZE) | ||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | VMLINUX_SYMBOL(_einitdata) = .; | ||
69 | |||
70 | _sdata = .; /* Start of data section */ | ||
71 | |||
72 | RO_DATA_SECTION(PAGE_SIZE) | ||
73 | |||
74 | /* initially writeable, then read-only */ | ||
75 | . = ALIGN(PAGE_SIZE); | ||
76 | __w1data_begin = .; | ||
77 | .w1data : AT(ADDR(.w1data) - LOAD_OFFSET) { | ||
78 | VMLINUX_SYMBOL(__w1data_begin) = .; | ||
79 | *(.w1data) | ||
80 | VMLINUX_SYMBOL(__w1data_end) = .; | ||
81 | } | ||
82 | |||
83 | RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
84 | |||
85 | _edata = .; | ||
86 | |||
87 | EXCEPTION_TABLE(L2_CACHE_BYTES) | ||
88 | NOTES | ||
89 | |||
90 | |||
91 | BSS_SECTION(8, PAGE_SIZE, 1) | ||
92 | _end = . ; | ||
93 | |||
94 | STABS_DEBUG | ||
95 | DWARF_DEBUG | ||
96 | |||
97 | DISCARDS | ||
98 | } | ||
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile new file mode 100644 index 000000000000..438af38bc9eb --- /dev/null +++ b/arch/tile/lib/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | # | ||
2 | # Makefile for TILE-specific library files.. | ||
3 | # | ||
4 | |||
5 | lib-y = cacheflush.o checksum.o cpumask.o delay.o \ | ||
6 | mb_incoherent.o uaccess.o \ | ||
7 | memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \ | ||
8 | strchr_$(BITS).o strlen_$(BITS).o | ||
9 | |||
10 | ifneq ($(CONFIG_TILEGX),y) | ||
11 | lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o | ||
12 | endif | ||
13 | |||
14 | lib-$(CONFIG_SMP) += spinlock_$(BITS).o usercopy_$(BITS).o | ||
15 | |||
16 | obj-$(CONFIG_MODULES) += exports.o | ||
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c new file mode 100644 index 000000000000..8040b42a8eea --- /dev/null +++ b/arch/tile/lib/atomic_32.c | |||
@@ -0,0 +1,330 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/cache.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/futex.h> | ||
22 | #include <arch/chip.h> | ||
23 | |||
24 | /* See <asm/atomic_32.h> */ | ||
25 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
26 | |||
27 | /* | ||
28 | * A block of memory containing locks for atomic ops. Each instance of this | ||
29 | * struct will be homed on a different CPU. | ||
30 | */ | ||
31 | struct atomic_locks_on_cpu { | ||
32 | int lock[ATOMIC_HASH_L2_SIZE]; | ||
33 | } __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4))); | ||
34 | |||
35 | static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool); | ||
36 | |||
37 | /* The locks we'll use until __init_atomic_per_cpu is called. */ | ||
38 | static struct atomic_locks_on_cpu __initdata initial_atomic_locks; | ||
39 | |||
40 | /* Hash into this vector to get a pointer to lock for the given atomic. */ | ||
41 | struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE] | ||
42 | __write_once = { | ||
43 | [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks) | ||
44 | }; | ||
45 | |||
46 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
47 | |||
48 | /* This page is remapped on startup to be hash-for-home. */ | ||
49 | int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */] | ||
50 | __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned"))); | ||
51 | |||
52 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
53 | |||
54 | static inline int *__atomic_hashed_lock(volatile void *v) | ||
55 | { | ||
56 | /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec.S */ | ||
57 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
58 | unsigned long i = | ||
59 | (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long)); | ||
60 | unsigned long n = __insn_crc32_32(0, i); | ||
61 | |||
62 | /* Grab high bits for L1 index. */ | ||
63 | unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT); | ||
64 | /* Grab low bits for L2 index. */ | ||
65 | unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1); | ||
66 | |||
67 | return &atomic_lock_ptr[l1_index]->lock[l2_index]; | ||
68 | #else | ||
69 | /* | ||
70 | * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. | ||
71 | * Using mm works here because atomic_locks is page aligned. | ||
72 | */ | ||
73 | unsigned long ptr = __insn_mm((unsigned long)v >> 1, | ||
74 | (unsigned long)atomic_locks, | ||
75 | 2, (ATOMIC_HASH_SHIFT + 2) - 1); | ||
76 | return (int *)ptr; | ||
77 | #endif | ||
78 | } | ||
79 | |||
80 | #ifdef CONFIG_SMP | ||
81 | /* Return whether the passed pointer is a valid atomic lock pointer. */ | ||
82 | static int is_atomic_lock(int *p) | ||
83 | { | ||
84 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
85 | int i; | ||
86 | for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { | ||
87 | |||
88 | if (p >= &atomic_lock_ptr[i]->lock[0] && | ||
89 | p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) { | ||
90 | return 1; | ||
91 | } | ||
92 | } | ||
93 | return 0; | ||
94 | #else | ||
95 | return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; | ||
96 | #endif | ||
97 | } | ||
98 | |||
99 | void __atomic_fault_unlock(int *irqlock_word) | ||
100 | { | ||
101 | BUG_ON(!is_atomic_lock(irqlock_word)); | ||
102 | BUG_ON(*irqlock_word != 1); | ||
103 | *irqlock_word = 0; | ||
104 | } | ||
105 | |||
106 | #endif /* CONFIG_SMP */ | ||
107 | |||
108 | static inline int *__atomic_setup(volatile void *v) | ||
109 | { | ||
110 | /* Issue a load to the target to bring it into cache. */ | ||
111 | *(volatile int *)v; | ||
112 | return __atomic_hashed_lock(v); | ||
113 | } | ||
114 | |||
115 | int _atomic_xchg(atomic_t *v, int n) | ||
116 | { | ||
117 | return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; | ||
118 | } | ||
119 | EXPORT_SYMBOL(_atomic_xchg); | ||
120 | |||
121 | int _atomic_xchg_add(atomic_t *v, int i) | ||
122 | { | ||
123 | return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; | ||
124 | } | ||
125 | EXPORT_SYMBOL(_atomic_xchg_add); | ||
126 | |||
127 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u) | ||
128 | { | ||
129 | /* | ||
130 | * Note: argument order is switched here since it is easier | ||
131 | * to use the first argument consistently as the "old value" | ||
132 | * in the assembly, as is done for _atomic_cmpxchg(). | ||
133 | */ | ||
134 | return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) | ||
135 | .val; | ||
136 | } | ||
137 | EXPORT_SYMBOL(_atomic_xchg_add_unless); | ||
138 | |||
139 | int _atomic_cmpxchg(atomic_t *v, int o, int n) | ||
140 | { | ||
141 | return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; | ||
142 | } | ||
143 | EXPORT_SYMBOL(_atomic_cmpxchg); | ||
144 | |||
145 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) | ||
146 | { | ||
147 | return __atomic_or((int *)p, __atomic_setup(p), mask).val; | ||
148 | } | ||
149 | EXPORT_SYMBOL(_atomic_or); | ||
150 | |||
151 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) | ||
152 | { | ||
153 | return __atomic_andn((int *)p, __atomic_setup(p), mask).val; | ||
154 | } | ||
155 | EXPORT_SYMBOL(_atomic_andn); | ||
156 | |||
157 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) | ||
158 | { | ||
159 | return __atomic_xor((int *)p, __atomic_setup(p), mask).val; | ||
160 | } | ||
161 | EXPORT_SYMBOL(_atomic_xor); | ||
162 | |||
163 | |||
164 | u64 _atomic64_xchg(atomic64_t *v, u64 n) | ||
165 | { | ||
166 | return __atomic64_xchg(&v->counter, __atomic_setup(v), n); | ||
167 | } | ||
168 | EXPORT_SYMBOL(_atomic64_xchg); | ||
169 | |||
170 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i) | ||
171 | { | ||
172 | return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); | ||
173 | } | ||
174 | EXPORT_SYMBOL(_atomic64_xchg_add); | ||
175 | |||
176 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) | ||
177 | { | ||
178 | /* | ||
179 | * Note: argument order is switched here since it is easier | ||
180 | * to use the first argument consistently as the "old value" | ||
181 | * in the assembly, as is done for _atomic_cmpxchg(). | ||
182 | */ | ||
183 | return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), | ||
184 | u, a); | ||
185 | } | ||
186 | EXPORT_SYMBOL(_atomic64_xchg_add_unless); | ||
187 | |||
188 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||
189 | { | ||
190 | return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); | ||
191 | } | ||
192 | EXPORT_SYMBOL(_atomic64_cmpxchg); | ||
193 | |||
194 | |||
195 | static inline int *__futex_setup(int __user *v) | ||
196 | { | ||
197 | /* | ||
198 | * Issue a prefetch to the counter to bring it into cache. | ||
199 | * As for __atomic_setup, but we can't do a read into the L1 | ||
200 | * since it might fault; instead we do a prefetch into the L2. | ||
201 | */ | ||
202 | __insn_prefetch(v); | ||
203 | return __atomic_hashed_lock((int __force *)v); | ||
204 | } | ||
205 | |||
206 | struct __get_user futex_set(int __user *v, int i) | ||
207 | { | ||
208 | return __atomic_xchg((int __force *)v, __futex_setup(v), i); | ||
209 | } | ||
210 | |||
211 | struct __get_user futex_add(int __user *v, int n) | ||
212 | { | ||
213 | return __atomic_xchg_add((int __force *)v, __futex_setup(v), n); | ||
214 | } | ||
215 | |||
216 | struct __get_user futex_or(int __user *v, int n) | ||
217 | { | ||
218 | return __atomic_or((int __force *)v, __futex_setup(v), n); | ||
219 | } | ||
220 | |||
221 | struct __get_user futex_andn(int __user *v, int n) | ||
222 | { | ||
223 | return __atomic_andn((int __force *)v, __futex_setup(v), n); | ||
224 | } | ||
225 | |||
226 | struct __get_user futex_xor(int __user *v, int n) | ||
227 | { | ||
228 | return __atomic_xor((int __force *)v, __futex_setup(v), n); | ||
229 | } | ||
230 | |||
231 | struct __get_user futex_cmpxchg(int __user *v, int o, int n) | ||
232 | { | ||
233 | return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n); | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * If any of the atomic or futex routines hit a bad address (not in | ||
238 | * the page tables at kernel PL) this routine is called. The futex | ||
239 | * routines are never used on kernel space, and the normal atomics and | ||
240 | * bitops are never used on user space. So a fault on kernel space | ||
241 | * must be fatal, but a fault on userspace is a futex fault and we | ||
242 | * need to return -EFAULT. Note that the context this routine is | ||
243 | * invoked in is the context of the "_atomic_xxx()" routines called | ||
244 | * by the functions in this file. | ||
245 | */ | ||
246 | struct __get_user __atomic_bad_address(int __user *addr) | ||
247 | { | ||
248 | if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) | ||
249 | panic("Bad address used for kernel atomic op: %p\n", addr); | ||
250 | return (struct __get_user) { .err = -EFAULT }; | ||
251 | } | ||
252 | |||
253 | |||
254 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
255 | static int __init noatomichash(char *str) | ||
256 | { | ||
257 | pr_warning("noatomichash is deprecated.\n"); | ||
258 | return 1; | ||
259 | } | ||
260 | __setup("noatomichash", noatomichash); | ||
261 | #endif | ||
262 | |||
263 | void __init __init_atomic_per_cpu(void) | ||
264 | { | ||
265 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
266 | |||
267 | unsigned int i; | ||
268 | int actual_cpu; | ||
269 | |||
270 | /* | ||
271 | * Before this is called from setup, we just have one lock for | ||
272 | * all atomic objects/operations. Here we replace the | ||
273 | * elements of atomic_lock_ptr so that they point at per_cpu | ||
274 | * integers. This seemingly over-complex approach stems from | ||
275 | * the fact that DEFINE_PER_CPU defines an entry for each cpu | ||
276 | * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But | ||
277 | * for efficient hashing of atomics to their locks we want a | ||
278 | * compile time constant power of 2 for the size of this | ||
279 | * table, so we use ATOMIC_HASH_SIZE. | ||
280 | * | ||
281 | * Here we populate atomic_lock_ptr from the per cpu | ||
282 | * atomic_lock_pool, interspersing by actual cpu so that | ||
283 | * subsequent elements are homed on consecutive cpus. | ||
284 | */ | ||
285 | |||
286 | actual_cpu = cpumask_first(cpu_possible_mask); | ||
287 | |||
288 | for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { | ||
289 | /* | ||
290 | * Preincrement to slightly bias against using cpu 0, | ||
291 | * which has plenty of stuff homed on it already. | ||
292 | */ | ||
293 | actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask); | ||
294 | if (actual_cpu >= nr_cpu_ids) | ||
295 | actual_cpu = cpumask_first(cpu_possible_mask); | ||
296 | |||
297 | atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu); | ||
298 | } | ||
299 | |||
300 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
301 | |||
302 | /* Validate power-of-two and "bigger than cpus" assumption */ | ||
303 | BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); | ||
304 | BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); | ||
305 | |||
306 | /* | ||
307 | * On TILEPro we prefer to use a single hash-for-home | ||
308 | * page, since this means atomic operations are less | ||
309 | * likely to encounter a TLB fault and thus should | ||
310 | * in general perform faster. You may wish to disable | ||
311 | * this in situations where few hash-for-home tiles | ||
312 | * are configured. | ||
313 | */ | ||
314 | BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); | ||
315 | |||
316 | /* The locks must all fit on one page. */ | ||
317 | BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); | ||
318 | |||
319 | /* | ||
320 | * We use the page offset of the atomic value's address as | ||
321 | * an index into atomic_locks, excluding the low 3 bits. | ||
322 | * That should not produce more indices than ATOMIC_HASH_SIZE. | ||
323 | */ | ||
324 | BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); | ||
325 | |||
326 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
327 | |||
328 | /* The futex code makes this assumption, so we validate it here. */ | ||
329 | BUG_ON(sizeof(atomic_t) != sizeof(int)); | ||
330 | } | ||
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S new file mode 100644 index 000000000000..5a5514b77e78 --- /dev/null +++ b/arch/tile/lib/atomic_asm_32.S | |||
@@ -0,0 +1,196 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Support routines for atomic operations. Each function takes: | ||
15 | * | ||
16 | * r0: address to manipulate | ||
17 | * r1: pointer to atomic lock guarding this operation (for FUTEX_LOCK_REG) | ||
18 | * r2: new value to write, or for cmpxchg/add_unless, value to compare against | ||
19 | * r3: (cmpxchg/xchg_add_unless) new value to write or add; | ||
20 | * (atomic64 ops) high word of value to write | ||
21 | * r4/r5: (cmpxchg64/add_unless64) new value to write or add | ||
22 | * | ||
23 | * The 32-bit routines return a "struct __get_user" so that the futex code | ||
24 | * has an opportunity to return -EFAULT to the user if needed. | ||
25 | * The 64-bit routines just return a "long long" with the value, | ||
26 | * since they are only used from kernel space and don't expect to fault. | ||
27 | * Support for 16-bit ops is included in the framework but we don't provide | ||
28 | * any (x86_64 has an atomic_inc_short(), so we might want to some day). | ||
29 | * | ||
30 | * Note that the caller is advised to issue a suitable L1 or L2 | ||
31 | * prefetch on the address being manipulated to avoid extra stalls. | ||
32 | * In addition, the hot path is on two icache lines, and we start with | ||
33 | * a jump to the second line to make sure they are both in cache so | ||
34 | * that we never stall waiting on icache fill while holding the lock. | ||
35 | * (This doesn't work out with most 64-bit ops, since they consume | ||
36 | * too many bundles, so may take an extra i-cache stall.) | ||
37 | * | ||
38 | * These routines set the INTERRUPT_CRITICAL_SECTION bit, just | ||
39 | * like sys_cmpxchg(), so that NMIs like PERF_COUNT will not interrupt | ||
40 | * the code, just page faults. | ||
41 | * | ||
42 | * If the load or store faults in a way that can be directly fixed in | ||
43 | * the do_page_fault_ics() handler (e.g. a vmalloc reference) we fix it | ||
44 | * directly, return to the instruction that faulted, and retry it. | ||
45 | * | ||
46 | * If the load or store faults in a way that potentially requires us | ||
47 | * to release the atomic lock, then retry (e.g. a migrating PTE), we | ||
48 | * reset the PC in do_page_fault_ics() to the "tns" instruction so | ||
49 | * that on return we will reacquire the lock and restart the op. We | ||
50 | * are somewhat overloading the exception_table_entry notion by doing | ||
51 | * this, since those entries are not normally used for migrating PTEs. | ||
52 | * | ||
53 | * If the main page fault handler discovers a bad address, it will see | ||
54 | * the PC pointing to the "tns" instruction (due to the earlier | ||
55 | * exception_table_entry processing in do_page_fault_ics), and | ||
56 | * re-reset the PC to the fault handler, atomic_bad_address(), which | ||
57 | * effectively takes over from the atomic op and can either return a | ||
58 | * bad "struct __get_user" (for user addresses) or can just panic (for | ||
59 | * bad kernel addresses). | ||
60 | * | ||
61 | * Note that if the value we would store is the same as what we | ||
62 | * loaded, we bypass the load. Other platforms with true atomics can | ||
63 | * make the guarantee that a non-atomic __clear_bit(), for example, | ||
64 | * can safely race with an atomic test_and_set_bit(); this example is | ||
65 | * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do | ||
66 | * that on Tile since the "atomic" op is really just a | ||
67 | * read/modify/write, and can race with the non-atomic | ||
68 | * read/modify/write. However, if we can short-circuit the write when | ||
69 | * it is not needed, in the atomic case, we avoid the race. | ||
70 | */ | ||
71 | |||
72 | #include <linux/linkage.h> | ||
73 | #include <asm/atomic.h> | ||
74 | #include <asm/page.h> | ||
75 | #include <asm/processor.h> | ||
76 | |||
77 | .section .text.atomic,"ax" | ||
78 | ENTRY(__start_atomic_asm_code) | ||
79 | |||
80 | .macro atomic_op, name, bitwidth, body | ||
81 | .align 64 | ||
82 | STD_ENTRY_SECTION(__atomic\name, .text.atomic) | ||
83 | { | ||
84 | movei r24, 1 | ||
85 | j 4f /* branch to second cache line */ | ||
86 | } | ||
87 | 1: { | ||
88 | .ifc \bitwidth,16 | ||
89 | lh r22, r0 | ||
90 | .else | ||
91 | lw r22, r0 | ||
92 | addi r28, r0, 4 | ||
93 | .endif | ||
94 | } | ||
95 | .ifc \bitwidth,64 | ||
96 | lw r23, r28 | ||
97 | .endif | ||
98 | \body /* set r24, and r25 if 64-bit */ | ||
99 | { | ||
100 | seq r26, r22, r24 | ||
101 | seq r27, r23, r25 | ||
102 | } | ||
103 | .ifc \bitwidth,64 | ||
104 | bbnst r27, 2f | ||
105 | .endif | ||
106 | bbs r26, 3f /* skip write-back if it's the same value */ | ||
107 | 2: { | ||
108 | .ifc \bitwidth,16 | ||
109 | sh r0, r24 | ||
110 | .else | ||
111 | sw r0, r24 | ||
112 | .endif | ||
113 | } | ||
114 | .ifc \bitwidth,64 | ||
115 | sw r28, r25 | ||
116 | .endif | ||
117 | mf | ||
118 | 3: { | ||
119 | move r0, r22 | ||
120 | .ifc \bitwidth,64 | ||
121 | move r1, r23 | ||
122 | .else | ||
123 | move r1, zero | ||
124 | .endif | ||
125 | sw ATOMIC_LOCK_REG_NAME, zero | ||
126 | } | ||
127 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
128 | jrp lr | ||
129 | 4: { | ||
130 | move ATOMIC_LOCK_REG_NAME, r1 | ||
131 | mtspr INTERRUPT_CRITICAL_SECTION, r24 | ||
132 | } | ||
133 | #ifndef CONFIG_SMP | ||
134 | j 1b /* no atomic locks */ | ||
135 | #else | ||
136 | { | ||
137 | tns r21, ATOMIC_LOCK_REG_NAME | ||
138 | moveli r23, 2048 /* maximum backoff time in cycles */ | ||
139 | } | ||
140 | { | ||
141 | bzt r21, 1b /* branch if lock acquired */ | ||
142 | moveli r25, 32 /* starting backoff time in cycles */ | ||
143 | } | ||
144 | 5: mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
145 | mfspr r26, CYCLE_LOW /* get start point for this backoff */ | ||
146 | 6: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */ | ||
147 | sub r22, r22, r26 | ||
148 | slt r22, r22, r25 | ||
149 | bbst r22, 6b | ||
150 | { | ||
151 | mtspr INTERRUPT_CRITICAL_SECTION, r24 | ||
152 | shli r25, r25, 1 /* double the backoff; retry the tns */ | ||
153 | } | ||
154 | { | ||
155 | tns r21, ATOMIC_LOCK_REG_NAME | ||
156 | slt r26, r23, r25 /* is the proposed backoff too big? */ | ||
157 | } | ||
158 | { | ||
159 | bzt r21, 1b /* branch if lock acquired */ | ||
160 | mvnz r25, r26, r23 | ||
161 | } | ||
162 | j 5b | ||
163 | #endif | ||
164 | STD_ENDPROC(__atomic\name) | ||
165 | .ifc \bitwidth,32 | ||
166 | .pushsection __ex_table,"a" | ||
167 | .word 1b, __atomic\name | ||
168 | .word 2b, __atomic\name | ||
169 | .word __atomic\name, __atomic_bad_address | ||
170 | .popsection | ||
171 | .endif | ||
172 | .endm | ||
173 | |||
174 | atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }" | ||
175 | atomic_op _xchg, 32, "move r24, r2" | ||
176 | atomic_op _xchg_add, 32, "add r24, r22, r2" | ||
177 | atomic_op _xchg_add_unless, 32, \ | ||
178 | "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" | ||
179 | atomic_op _or, 32, "or r24, r22, r2" | ||
180 | atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" | ||
181 | atomic_op _xor, 32, "xor r24, r22, r2" | ||
182 | |||
183 | atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \ | ||
184 | { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }" | ||
185 | atomic_op 64_xchg, 64, "{ move r24, r2; move r25, r3 }" | ||
186 | atomic_op 64_xchg_add, 64, "{ add r24, r22, r2; add r25, r23, r3 }; \ | ||
187 | slt_u r26, r24, r22; add r25, r25, r26" | ||
188 | atomic_op 64_xchg_add_unless, 64, \ | ||
189 | "{ sne r26, r22, r2; sne r27, r23, r3 }; \ | ||
190 | { bbns r26, 3f; add r24, r22, r4 }; \ | ||
191 | { bbns r27, 3f; add r25, r23, r5 }; \ | ||
192 | slt_u r26, r24, r22; add r25, r25, r26" | ||
193 | |||
194 | jrp lr /* happy backtracer */ | ||
195 | |||
196 | ENTRY(__end_atomic_asm_code) | ||
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c new file mode 100644 index 000000000000..11b6164c2097 --- /dev/null +++ b/arch/tile/lib/cacheflush.c | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <asm/page.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <arch/icache.h> | ||
18 | |||
19 | |||
20 | void __flush_icache_range(unsigned long start, unsigned long end) | ||
21 | { | ||
22 | invalidate_icache((const void *)start, end - start, PAGE_SIZE); | ||
23 | } | ||
diff --git a/arch/tile/lib/checksum.c b/arch/tile/lib/checksum.c new file mode 100644 index 000000000000..e4bab5bd3f31 --- /dev/null +++ b/arch/tile/lib/checksum.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * Support code for the main lib/checksum.c. | ||
14 | */ | ||
15 | |||
16 | #include <net/checksum.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | static inline unsigned int longto16(unsigned long x) | ||
20 | { | ||
21 | unsigned long ret; | ||
22 | #ifdef __tilegx__ | ||
23 | ret = __insn_v2sadu(x, 0); | ||
24 | ret = __insn_v2sadu(ret, 0); | ||
25 | #else | ||
26 | ret = __insn_sadh_u(x, 0); | ||
27 | ret = __insn_sadh_u(ret, 0); | ||
28 | #endif | ||
29 | return ret; | ||
30 | } | ||
31 | |||
32 | __wsum do_csum(const unsigned char *buff, int len) | ||
33 | { | ||
34 | int odd, count; | ||
35 | unsigned long result = 0; | ||
36 | |||
37 | if (len <= 0) | ||
38 | goto out; | ||
39 | odd = 1 & (unsigned long) buff; | ||
40 | if (odd) { | ||
41 | result = (*buff << 8); | ||
42 | len--; | ||
43 | buff++; | ||
44 | } | ||
45 | count = len >> 1; /* nr of 16-bit words.. */ | ||
46 | if (count) { | ||
47 | if (2 & (unsigned long) buff) { | ||
48 | result += *(const unsigned short *)buff; | ||
49 | count--; | ||
50 | len -= 2; | ||
51 | buff += 2; | ||
52 | } | ||
53 | count >>= 1; /* nr of 32-bit words.. */ | ||
54 | if (count) { | ||
55 | #ifdef __tilegx__ | ||
56 | if (4 & (unsigned long) buff) { | ||
57 | unsigned int w = *(const unsigned int *)buff; | ||
58 | result = __insn_v2sadau(result, w, 0); | ||
59 | count--; | ||
60 | len -= 4; | ||
61 | buff += 4; | ||
62 | } | ||
63 | count >>= 1; /* nr of 64-bit words.. */ | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * This algorithm could wrap around for very | ||
68 | * large buffers, but those should be impossible. | ||
69 | */ | ||
70 | BUG_ON(count >= 65530); | ||
71 | |||
72 | while (count) { | ||
73 | unsigned long w = *(const unsigned long *)buff; | ||
74 | count--; | ||
75 | buff += sizeof(w); | ||
76 | #ifdef __tilegx__ | ||
77 | result = __insn_v2sadau(result, w, 0); | ||
78 | #else | ||
79 | result = __insn_sadah_u(result, w, 0); | ||
80 | #endif | ||
81 | } | ||
82 | #ifdef __tilegx__ | ||
83 | if (len & 4) { | ||
84 | unsigned int w = *(const unsigned int *)buff; | ||
85 | result = __insn_v2sadau(result, w, 0); | ||
86 | buff += 4; | ||
87 | } | ||
88 | #endif | ||
89 | } | ||
90 | if (len & 2) { | ||
91 | result += *(const unsigned short *) buff; | ||
92 | buff += 2; | ||
93 | } | ||
94 | } | ||
95 | if (len & 1) | ||
96 | result += *buff; | ||
97 | result = longto16(result); | ||
98 | if (odd) | ||
99 | result = swab16(result); | ||
100 | out: | ||
101 | return result; | ||
102 | } | ||
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c new file mode 100644 index 000000000000..fdc403614d12 --- /dev/null +++ b/arch/tile/lib/cpumask.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/cpumask.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/smp.h> | ||
19 | |||
20 | /* | ||
21 | * Allow cropping out bits beyond the end of the array. | ||
22 | * Move to "lib" directory if more clients want to use this routine. | ||
23 | */ | ||
24 | int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) | ||
25 | { | ||
26 | unsigned a, b; | ||
27 | |||
28 | bitmap_zero(maskp, nmaskbits); | ||
29 | do { | ||
30 | if (!isdigit(*bp)) | ||
31 | return -EINVAL; | ||
32 | a = simple_strtoul(bp, (char **)&bp, 10); | ||
33 | b = a; | ||
34 | if (*bp == '-') { | ||
35 | bp++; | ||
36 | if (!isdigit(*bp)) | ||
37 | return -EINVAL; | ||
38 | b = simple_strtoul(bp, (char **)&bp, 10); | ||
39 | } | ||
40 | if (!(a <= b)) | ||
41 | return -EINVAL; | ||
42 | if (b >= nmaskbits) | ||
43 | b = nmaskbits-1; | ||
44 | while (a <= b) { | ||
45 | set_bit(a, maskp); | ||
46 | a++; | ||
47 | } | ||
48 | if (*bp == ',') | ||
49 | bp++; | ||
50 | } while (*bp != '\0' && *bp != '\n'); | ||
51 | return 0; | ||
52 | } | ||
diff --git a/arch/tile/lib/delay.c b/arch/tile/lib/delay.c new file mode 100644 index 000000000000..5801b03c13ef --- /dev/null +++ b/arch/tile/lib/delay.c | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/thread_info.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <hv/hypervisor.h> | ||
20 | |||
21 | void __udelay(unsigned long usecs) | ||
22 | { | ||
23 | hv_nanosleep(usecs * 1000); | ||
24 | } | ||
25 | EXPORT_SYMBOL(__udelay); | ||
26 | |||
27 | void __ndelay(unsigned long nsecs) | ||
28 | { | ||
29 | hv_nanosleep(nsecs); | ||
30 | } | ||
31 | EXPORT_SYMBOL(__ndelay); | ||
32 | |||
33 | /* FIXME: should be declared in a header somewhere. */ | ||
34 | EXPORT_SYMBOL(__delay); | ||
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c new file mode 100644 index 000000000000..6bc7b52b4aa0 --- /dev/null +++ b/arch/tile/lib/exports.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Exports from assembler code and from libtile-cc. | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | |||
19 | /* arch/tile/lib/usercopy.S */ | ||
20 | #include <linux/uaccess.h> | ||
21 | EXPORT_SYMBOL(__get_user_1); | ||
22 | EXPORT_SYMBOL(__get_user_2); | ||
23 | EXPORT_SYMBOL(__get_user_4); | ||
24 | EXPORT_SYMBOL(__get_user_8); | ||
25 | EXPORT_SYMBOL(__put_user_1); | ||
26 | EXPORT_SYMBOL(__put_user_2); | ||
27 | EXPORT_SYMBOL(__put_user_4); | ||
28 | EXPORT_SYMBOL(__put_user_8); | ||
29 | EXPORT_SYMBOL(strnlen_user_asm); | ||
30 | EXPORT_SYMBOL(strncpy_from_user_asm); | ||
31 | EXPORT_SYMBOL(clear_user_asm); | ||
32 | |||
33 | /* arch/tile/kernel/entry.S */ | ||
34 | #include <linux/kernel.h> | ||
35 | #include <asm/processor.h> | ||
36 | EXPORT_SYMBOL(current_text_addr); | ||
37 | EXPORT_SYMBOL(dump_stack); | ||
38 | |||
39 | /* arch/tile/lib/__memcpy.S */ | ||
40 | /* NOTE: on TILE64, these symbols appear in arch/tile/lib/memcpy_tile64.c */ | ||
41 | EXPORT_SYMBOL(memcpy); | ||
42 | EXPORT_SYMBOL(__copy_to_user_inatomic); | ||
43 | EXPORT_SYMBOL(__copy_from_user_inatomic); | ||
44 | EXPORT_SYMBOL(__copy_from_user_zeroing); | ||
45 | |||
46 | /* hypervisor glue */ | ||
47 | #include <hv/hypervisor.h> | ||
48 | EXPORT_SYMBOL(hv_dev_open); | ||
49 | EXPORT_SYMBOL(hv_dev_pread); | ||
50 | EXPORT_SYMBOL(hv_dev_pwrite); | ||
51 | EXPORT_SYMBOL(hv_dev_close); | ||
52 | |||
53 | /* -ltile-cc */ | ||
54 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); | ||
55 | EXPORT_SYMBOL(__udivsi3); | ||
56 | int32_t __divsi3(int32_t dividend, int32_t divisor); | ||
57 | EXPORT_SYMBOL(__divsi3); | ||
58 | uint64_t __udivdi3(uint64_t dividend, uint64_t divisor); | ||
59 | EXPORT_SYMBOL(__udivdi3); | ||
60 | int64_t __divdi3(int64_t dividend, int64_t divisor); | ||
61 | EXPORT_SYMBOL(__divdi3); | ||
62 | uint32_t __umodsi3(uint32_t dividend, uint32_t divisor); | ||
63 | EXPORT_SYMBOL(__umodsi3); | ||
64 | int32_t __modsi3(int32_t dividend, int32_t divisor); | ||
65 | EXPORT_SYMBOL(__modsi3); | ||
66 | uint64_t __umoddi3(uint64_t dividend, uint64_t divisor); | ||
67 | EXPORT_SYMBOL(__umoddi3); | ||
68 | int64_t __moddi3(int64_t dividend, int64_t divisor); | ||
69 | EXPORT_SYMBOL(__moddi3); | ||
70 | #ifndef __tilegx__ | ||
71 | uint64_t __ll_mul(uint64_t n0, uint64_t n1); | ||
72 | EXPORT_SYMBOL(__ll_mul); | ||
73 | #endif | ||
74 | #ifndef __tilegx__ | ||
75 | int64_t __muldi3(int64_t, int64_t); | ||
76 | EXPORT_SYMBOL(__muldi3); | ||
77 | uint64_t __lshrdi3(uint64_t, unsigned int); | ||
78 | EXPORT_SYMBOL(__lshrdi3); | ||
79 | #endif | ||
diff --git a/arch/tile/lib/mb_incoherent.S b/arch/tile/lib/mb_incoherent.S new file mode 100644 index 000000000000..989ad7b68d5a --- /dev/null +++ b/arch/tile/lib/mb_incoherent.S | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Assembly code for invoking the HV's fence_incoherent syscall. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <hv/syscall_public.h> | ||
19 | #include <arch/abi.h> | ||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
23 | |||
24 | /* | ||
25 | * Invoke the hypervisor's fence_incoherent syscall, which guarantees | ||
26 | * that all victims for cachelines homed on this tile have reached memory. | ||
27 | */ | ||
28 | STD_ENTRY(__mb_incoherent) | ||
29 | moveli TREG_SYSCALL_NR_NAME, HV_SYS_fence_incoherent | ||
30 | swint2 | ||
31 | jrp lr | ||
32 | STD_ENDPROC(__mb_incoherent) | ||
33 | |||
34 | #endif | ||
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c new file mode 100644 index 000000000000..6235283b4859 --- /dev/null +++ b/arch/tile/lib/memchr_32.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | void *memchr(const void *s, int c, size_t n) | ||
20 | { | ||
21 | /* Get an aligned pointer. */ | ||
22 | const uintptr_t s_int = (uintptr_t) s; | ||
23 | const uint32_t *p = (const uint32_t *)(s_int & -4); | ||
24 | |||
25 | /* Create four copies of the byte for which we are looking. */ | ||
26 | const uint32_t goal = 0x01010101 * (uint8_t) c; | ||
27 | |||
28 | /* Read the first word, but munge it so that bytes before the array | ||
29 | * will not match goal. | ||
30 | * | ||
31 | * Note that this shift count expression works because we know | ||
32 | * shift counts are taken mod 32. | ||
33 | */ | ||
34 | const uint32_t before_mask = (1 << (s_int << 3)) - 1; | ||
35 | uint32_t v = (*p | before_mask) ^ (goal & before_mask); | ||
36 | |||
37 | /* Compute the address of the last byte. */ | ||
38 | const char *const last_byte_ptr = (const char *)s + n - 1; | ||
39 | |||
40 | /* Compute the address of the word containing the last byte. */ | ||
41 | const uint32_t *const last_word_ptr = | ||
42 | (const uint32_t *)((uintptr_t) last_byte_ptr & -4); | ||
43 | |||
44 | uint32_t bits; | ||
45 | char *ret; | ||
46 | |||
47 | if (__builtin_expect(n == 0, 0)) { | ||
48 | /* Don't dereference any memory if the array is empty. */ | ||
49 | return NULL; | ||
50 | } | ||
51 | |||
52 | while ((bits = __insn_seqb(v, goal)) == 0) { | ||
53 | if (__builtin_expect(p == last_word_ptr, 0)) { | ||
54 | /* We already read the last word in the array, | ||
55 | * so give up. | ||
56 | */ | ||
57 | return NULL; | ||
58 | } | ||
59 | v = *++p; | ||
60 | } | ||
61 | |||
62 | /* We found a match, but it might be in a byte past the end | ||
63 | * of the array. | ||
64 | */ | ||
65 | ret = ((char *)p) + (__insn_ctz(bits) >> 3); | ||
66 | return (ret <= last_byte_ptr) ? ret : NULL; | ||
67 | } | ||
68 | EXPORT_SYMBOL(memchr); | ||
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S new file mode 100644 index 000000000000..f92984bf60ec --- /dev/null +++ b/arch/tile/lib/memcpy_32.S | |||
@@ -0,0 +1,628 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file shares the implementation of the userspace memcpy and | ||
15 | * the kernel's memcpy, copy_to_user and copy_from_user. | ||
16 | */ | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_HAS_WH64() || defined(MEMCPY_TEST_WH64) | ||
21 | #define MEMCPY_USE_WH64 | ||
22 | #endif | ||
23 | |||
24 | |||
25 | #include <linux/linkage.h> | ||
26 | |||
27 | /* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */ | ||
28 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
29 | #define memcpy __memcpy_asm | ||
30 | #define __copy_to_user_inatomic __copy_to_user_inatomic_asm | ||
31 | #define __copy_from_user_inatomic __copy_from_user_inatomic_asm | ||
32 | #define __copy_from_user_zeroing __copy_from_user_zeroing_asm | ||
33 | #endif | ||
34 | |||
35 | #define IS_MEMCPY 0 | ||
36 | #define IS_COPY_FROM_USER 1 | ||
37 | #define IS_COPY_FROM_USER_ZEROING 2 | ||
38 | #define IS_COPY_TO_USER -1 | ||
39 | |||
40 | .section .text.memcpy_common, "ax" | ||
41 | .align 64 | ||
42 | |||
43 | /* Use this to preface each bundle that can cause an exception so | ||
44 | * the kernel can clean up properly. The special cleanup code should | ||
45 | * not use these, since it knows what it is doing. | ||
46 | */ | ||
47 | #define EX \ | ||
48 | .pushsection __ex_table, "a"; \ | ||
49 | .word 9f, memcpy_common_fixup; \ | ||
50 | .popsection; \ | ||
51 | 9 | ||
52 | |||
53 | |||
54 | /* __copy_from_user_inatomic takes the kernel target address in r0, | ||
55 | * the user source in r1, and the bytes to copy in r2. | ||
56 | * It returns the number of uncopiable bytes (hopefully zero) in r0. | ||
57 | */ | ||
58 | ENTRY(__copy_from_user_inatomic) | ||
59 | .type __copy_from_user_inatomic, @function | ||
60 | FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ | ||
61 | .text.memcpy_common, \ | ||
62 | .Lend_memcpy_common - __copy_from_user_inatomic) | ||
63 | { movei r29, IS_COPY_FROM_USER; j memcpy_common } | ||
64 | .size __copy_from_user_inatomic, . - __copy_from_user_inatomic | ||
65 | |||
66 | /* __copy_from_user_zeroing is like __copy_from_user_inatomic, but | ||
67 | * any uncopiable bytes are zeroed in the target. | ||
68 | */ | ||
69 | ENTRY(__copy_from_user_zeroing) | ||
70 | .type __copy_from_user_zeroing, @function | ||
71 | FEEDBACK_REENTER(__copy_from_user_inatomic) | ||
72 | { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common } | ||
73 | .size __copy_from_user_zeroing, . - __copy_from_user_zeroing | ||
74 | |||
75 | /* __copy_to_user_inatomic takes the user target address in r0, | ||
76 | * the kernel source in r1, and the bytes to copy in r2. | ||
77 | * It returns the number of uncopiable bytes (hopefully zero) in r0. | ||
78 | */ | ||
79 | ENTRY(__copy_to_user_inatomic) | ||
80 | .type __copy_to_user_inatomic, @function | ||
81 | FEEDBACK_REENTER(__copy_from_user_inatomic) | ||
82 | { movei r29, IS_COPY_TO_USER; j memcpy_common } | ||
83 | .size __copy_to_user_inatomic, . - __copy_to_user_inatomic | ||
84 | |||
85 | ENTRY(memcpy) | ||
86 | .type memcpy, @function | ||
87 | FEEDBACK_REENTER(__copy_from_user_inatomic) | ||
88 | { movei r29, IS_MEMCPY } | ||
89 | .size memcpy, . - memcpy | ||
90 | /* Fall through */ | ||
91 | |||
92 | .type memcpy_common, @function | ||
93 | memcpy_common: | ||
94 | /* On entry, r29 holds one of the IS_* macro values from above. */ | ||
95 | |||
96 | |||
97 | /* r0 is the dest, r1 is the source, r2 is the size. */ | ||
98 | |||
99 | /* Save aside original dest so we can return it at the end. */ | ||
100 | { sw sp, lr; move r23, r0; or r4, r0, r1 } | ||
101 | |||
102 | /* Check for an empty size. */ | ||
103 | { bz r2, .Ldone; andi r4, r4, 3 } | ||
104 | |||
105 | /* Save aside original values in case of a fault. */ | ||
106 | { move r24, r1; move r25, r2 } | ||
107 | move r27, lr | ||
108 | |||
109 | /* Check for an unaligned source or dest. */ | ||
110 | { bnz r4, .Lcopy_unaligned_maybe_many; addli r4, r2, -256 } | ||
111 | |||
112 | .Lcheck_aligned_copy_size: | ||
113 | /* If we are copying < 256 bytes, branch to simple case. */ | ||
114 | { blzt r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
115 | |||
116 | /* Copying >= 256 bytes, so jump to complex prefetching loop. */ | ||
117 | { andi r6, r1, 63; j .Lcopy_many } | ||
118 | |||
119 | /* | ||
120 | * | ||
121 | * Aligned 4 byte at a time copy loop | ||
122 | * | ||
123 | */ | ||
124 | |||
125 | .Lcopy_8_loop: | ||
126 | /* Copy two words at a time to hide load latency. */ | ||
127 | EX: { lw r3, r1; addi r1, r1, 4; slti_u r8, r2, 16 } | ||
128 | EX: { lw r4, r1; addi r1, r1, 4 } | ||
129 | EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } | ||
130 | EX: { sw r0, r4; addi r0, r0, 4; addi r2, r2, -4 } | ||
131 | .Lcopy_8_check: | ||
132 | { bzt r8, .Lcopy_8_loop; slti_u r4, r2, 4 } | ||
133 | |||
134 | /* Copy odd leftover word, if any. */ | ||
135 | { bnzt r4, .Lcheck_odd_stragglers } | ||
136 | EX: { lw r3, r1; addi r1, r1, 4 } | ||
137 | EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } | ||
138 | |||
139 | .Lcheck_odd_stragglers: | ||
140 | { bnz r2, .Lcopy_unaligned_few } | ||
141 | |||
142 | .Ldone: | ||
143 | /* For memcpy return original dest address, else zero. */ | ||
144 | { mz r0, r29, r23; jrp lr } | ||
145 | |||
146 | |||
147 | /* | ||
148 | * | ||
149 | * Prefetching multiple cache line copy handler (for large transfers). | ||
150 | * | ||
151 | */ | ||
152 | |||
153 | /* Copy words until r1 is cache-line-aligned. */ | ||
154 | .Lalign_loop: | ||
155 | EX: { lw r3, r1; addi r1, r1, 4 } | ||
156 | { andi r6, r1, 63 } | ||
157 | EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } | ||
158 | .Lcopy_many: | ||
159 | { bnzt r6, .Lalign_loop; addi r9, r0, 63 } | ||
160 | |||
161 | { addi r3, r1, 60; andi r9, r9, -64 } | ||
162 | |||
163 | #ifdef MEMCPY_USE_WH64 | ||
164 | /* No need to prefetch dst, we'll just do the wh64 | ||
165 | * right before we copy a line. | ||
166 | */ | ||
167 | #endif | ||
168 | |||
169 | EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 } | ||
170 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
171 | { bnzt zero, .; move r27, lr } | ||
172 | EX: { lw r6, r3; addi r3, r3, 64 } | ||
173 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
174 | { bnzt zero, . } | ||
175 | EX: { lw r7, r3; addi r3, r3, 64 } | ||
176 | #ifndef MEMCPY_USE_WH64 | ||
177 | /* Prefetch the dest */ | ||
178 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
179 | { bnzt zero, . } | ||
180 | /* Use a real load to cause a TLB miss if necessary. We aren't using | ||
181 | * r28, so this should be fine. | ||
182 | */ | ||
183 | EX: { lw r28, r9; addi r9, r9, 64 } | ||
184 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
185 | { bnzt zero, . } | ||
186 | { prefetch r9; addi r9, r9, 64 } | ||
187 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
188 | { bnzt zero, . } | ||
189 | { prefetch r9; addi r9, r9, 64 } | ||
190 | #endif | ||
191 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
192 | { bz zero, .Lbig_loop2 } | ||
193 | |||
194 | /* On entry to this loop: | ||
195 | * - r0 points to the start of dst line 0 | ||
196 | * - r1 points to start of src line 0 | ||
197 | * - r2 >= (256 - 60), only the first time the loop trips. | ||
198 | * - r3 contains r1 + 128 + 60 [pointer to end of source line 2] | ||
199 | * This is our prefetch address. When we get near the end | ||
200 | * rather than prefetching off the end this is changed to point | ||
201 | * to some "safe" recently loaded address. | ||
202 | * - r5 contains *(r1 + 60) [i.e. last word of source line 0] | ||
203 | * - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1] | ||
204 | * - r9 contains ((r0 + 63) & -64) | ||
205 | * [start of next dst cache line.] | ||
206 | */ | ||
207 | |||
208 | .Lbig_loop: | ||
209 | { jal .Lcopy_line2; add r15, r1, r2 } | ||
210 | |||
211 | .Lbig_loop2: | ||
212 | /* Copy line 0, first stalling until r5 is ready. */ | ||
213 | EX: { move r12, r5; lw r16, r1 } | ||
214 | { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
215 | /* Prefetch several lines ahead. */ | ||
216 | EX: { lw r5, r3; addi r3, r3, 64 } | ||
217 | { jal .Lcopy_line } | ||
218 | |||
219 | /* Copy line 1, first stalling until r6 is ready. */ | ||
220 | EX: { move r12, r6; lw r16, r1 } | ||
221 | { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
222 | /* Prefetch several lines ahead. */ | ||
223 | EX: { lw r6, r3; addi r3, r3, 64 } | ||
224 | { jal .Lcopy_line } | ||
225 | |||
226 | /* Copy line 2, first stalling until r7 is ready. */ | ||
227 | EX: { move r12, r7; lw r16, r1 } | ||
228 | { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
229 | /* Prefetch several lines ahead. */ | ||
230 | EX: { lw r7, r3; addi r3, r3, 64 } | ||
231 | /* Use up a caches-busy cycle by jumping back to the top of the | ||
232 | * loop. Might as well get it out of the way now. | ||
233 | */ | ||
234 | { j .Lbig_loop } | ||
235 | |||
236 | |||
237 | /* On entry: | ||
238 | * - r0 points to the destination line. | ||
239 | * - r1 points to the source line. | ||
240 | * - r3 is the next prefetch address. | ||
241 | * - r9 holds the last address used for wh64. | ||
242 | * - r12 = WORD_15 | ||
243 | * - r16 = WORD_0. | ||
244 | * - r17 == r1 + 16. | ||
245 | * - r27 holds saved lr to restore. | ||
246 | * | ||
247 | * On exit: | ||
248 | * - r0 is incremented by 64. | ||
249 | * - r1 is incremented by 64, unless that would point to a word | ||
250 | * beyond the end of the source array, in which case it is redirected | ||
251 | * to point to an arbitrary word already in the cache. | ||
252 | * - r2 is decremented by 64. | ||
253 | * - r3 is unchanged, unless it points to a word beyond the | ||
254 | * end of the source array, in which case it is redirected | ||
255 | * to point to an arbitrary word already in the cache. | ||
256 | * Redirecting is OK since if we are that close to the end | ||
257 | * of the array we will not come back to this subroutine | ||
258 | * and use the contents of the prefetched address. | ||
259 | * - r4 is nonzero iff r2 >= 64. | ||
260 | * - r9 is incremented by 64, unless it points beyond the | ||
261 | * end of the last full destination cache line, in which | ||
262 | * case it is redirected to a "safe address" that can be | ||
263 | * clobbered (sp - 64) | ||
264 | * - lr contains the value in r27. | ||
265 | */ | ||
266 | |||
267 | /* r26 unused */ | ||
268 | |||
269 | .Lcopy_line: | ||
270 | /* TODO: when r3 goes past the end, we would like to redirect it | ||
271 | * to prefetch the last partial cache line (if any) just once, for the | ||
272 | * benefit of the final cleanup loop. But we don't want to | ||
273 | * prefetch that line more than once, or subsequent prefetches | ||
274 | * will go into the RTF. But then .Lbig_loop should unconditionally | ||
275 | * branch to top of loop to execute final prefetch, and its | ||
276 | * nop should become a conditional branch. | ||
277 | */ | ||
278 | |||
279 | /* We need two non-memory cycles here to cover the resources | ||
280 | * used by the loads initiated by the caller. | ||
281 | */ | ||
282 | { add r15, r1, r2 } | ||
283 | .Lcopy_line2: | ||
284 | { slt_u r13, r3, r15; addi r17, r1, 16 } | ||
285 | |||
286 | /* NOTE: this will stall for one cycle as L1 is busy. */ | ||
287 | |||
288 | /* Fill second L1D line. */ | ||
289 | EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */ | ||
290 | |||
291 | #ifdef MEMCPY_TEST_WH64 | ||
292 | /* Issue a fake wh64 that clobbers the destination words | ||
293 | * with random garbage, for testing. | ||
294 | */ | ||
295 | { movei r19, 64; crc32_32 r10, r2, r9 } | ||
296 | .Lwh64_test_loop: | ||
297 | EX: { sw r9, r10; addi r9, r9, 4; addi r19, r19, -4 } | ||
298 | { bnzt r19, .Lwh64_test_loop; crc32_32 r10, r10, r19 } | ||
299 | #elif CHIP_HAS_WH64() | ||
300 | /* Prepare destination line for writing. */ | ||
301 | EX: { wh64 r9; addi r9, r9, 64 } | ||
302 | #else | ||
303 | /* Prefetch dest line */ | ||
304 | { prefetch r9; addi r9, r9, 64 } | ||
305 | #endif | ||
306 | /* Load seven words that are L1D hits to cover wh64 L2 usage. */ | ||
307 | |||
308 | /* Load the three remaining words from the last L1D line, which | ||
309 | * we know has already filled the L1D. | ||
310 | */ | ||
311 | EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */ | ||
312 | EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */ | ||
313 | EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */ | ||
314 | |||
315 | /* Load the three remaining words from the first L1D line, first | ||
316 | * stalling until it has filled by "looking at" r16. | ||
317 | */ | ||
318 | EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */ | ||
319 | EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */ | ||
320 | EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */ | ||
321 | |||
322 | /* Load second word from the second L1D line, first | ||
323 | * stalling until it has filled by "looking at" r17. | ||
324 | */ | ||
325 | EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */ | ||
326 | |||
327 | /* Store last word to the destination line, potentially dirtying it | ||
328 | * for the first time, which keeps the L2 busy for two cycles. | ||
329 | */ | ||
330 | EX: { sw r10, r12 } /* store(WORD_15) */ | ||
331 | |||
332 | /* Use two L1D hits to cover the sw L2 access above. */ | ||
333 | EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */ | ||
334 | EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */ | ||
335 | |||
336 | /* Fill third L1D line. */ | ||
337 | EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */ | ||
338 | |||
339 | /* Store first L1D line. */ | ||
340 | EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */ | ||
341 | EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */ | ||
342 | EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */ | ||
343 | #ifdef MEMCPY_USE_WH64 | ||
344 | EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */ | ||
345 | #else | ||
346 | /* Back up the r9 to a cache line we are already storing to | ||
347 | * if it gets past the end of the dest vector. Strictly speaking, | ||
348 | * we don't need to back up to the start of a cache line, but it's free | ||
349 | * and tidy, so why not? | ||
350 | */ | ||
351 | EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */ | ||
352 | #endif | ||
353 | /* Store second L1D line. */ | ||
354 | EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */ | ||
355 | EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */ | ||
356 | EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */ | ||
357 | EX: { sw r0, r12; addi r0, r0, 4 } /* store(WORD_7) */ | ||
358 | |||
359 | EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */ | ||
360 | EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */ | ||
361 | EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */ | ||
362 | |||
363 | /* Store third L1D line. */ | ||
364 | EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */ | ||
365 | EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */ | ||
366 | EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */ | ||
367 | EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */ | ||
368 | |||
369 | /* Store rest of fourth L1D line. */ | ||
370 | EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */ | ||
371 | { | ||
372 | EX: sw r0, r8 /* store(WORD_13) */ | ||
373 | addi r0, r0, 4 | ||
374 | /* Will r2 be > 64 after we subtract 64 below? */ | ||
375 | shri r4, r2, 7 | ||
376 | } | ||
377 | { | ||
378 | EX: sw r0, r11 /* store(WORD_14) */ | ||
379 | addi r0, r0, 8 | ||
380 | /* Record 64 bytes successfully copied. */ | ||
381 | addi r2, r2, -64 | ||
382 | } | ||
383 | |||
384 | { jrp lr; move lr, r27 } | ||
385 | |||
386 | /* Convey to the backtrace library that the stack frame is size | ||
387 | * zero, and the real return address is on the stack rather than | ||
388 | * in 'lr'. | ||
389 | */ | ||
390 | { info 8 } | ||
391 | |||
392 | .align 64 | ||
393 | .Lcopy_unaligned_maybe_many: | ||
394 | /* Skip the setup overhead if we aren't copying many bytes. */ | ||
395 | { slti_u r8, r2, 20; sub r4, zero, r0 } | ||
396 | { bnzt r8, .Lcopy_unaligned_few; andi r4, r4, 3 } | ||
397 | { bz r4, .Ldest_is_word_aligned; add r18, r1, r2 } | ||
398 | |||
399 | /* | ||
400 | * | ||
401 | * unaligned 4 byte at a time copy handler. | ||
402 | * | ||
403 | */ | ||
404 | |||
405 | /* Copy single bytes until r0 == 0 mod 4, so we can store words. */ | ||
406 | .Lalign_dest_loop: | ||
407 | EX: { lb_u r3, r1; addi r1, r1, 1; addi r4, r4, -1 } | ||
408 | EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
409 | { bnzt r4, .Lalign_dest_loop; andi r3, r1, 3 } | ||
410 | |||
411 | /* If source and dest are now *both* aligned, do an aligned copy. */ | ||
412 | { bz r3, .Lcheck_aligned_copy_size; addli r4, r2, -256 } | ||
413 | |||
414 | .Ldest_is_word_aligned: | ||
415 | |||
416 | #if CHIP_HAS_DWORD_ALIGN() | ||
417 | EX: { andi r8, r0, 63; lwadd_na r6, r1, 4} | ||
418 | { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned } | ||
419 | |||
420 | /* This copies unaligned words until either there are fewer | ||
421 | * than 4 bytes left to copy, or until the destination pointer | ||
422 | * is cache-aligned, whichever comes first. | ||
423 | * | ||
424 | * On entry: | ||
425 | * - r0 is the next store address. | ||
426 | * - r1 points 4 bytes past the load address corresponding to r0. | ||
427 | * - r2 >= 4 | ||
428 | * - r6 is the next aligned word loaded. | ||
429 | */ | ||
430 | .Lcopy_unaligned_src_words: | ||
431 | EX: { lwadd_na r7, r1, 4; slti_u r8, r2, 4 + 4 } | ||
432 | /* stall */ | ||
433 | { dword_align r6, r7, r1; slti_u r9, r2, 64 + 4 } | ||
434 | EX: { swadd r0, r6, 4; addi r2, r2, -4 } | ||
435 | { bnz r8, .Lcleanup_unaligned_words; andi r8, r0, 63 } | ||
436 | { bnzt r8, .Lcopy_unaligned_src_words; move r6, r7 } | ||
437 | |||
438 | /* On entry: | ||
439 | * - r0 is the next store address. | ||
440 | * - r1 points 4 bytes past the load address corresponding to r0. | ||
441 | * - r2 >= 4 (# of bytes left to store). | ||
442 | * - r6 is the next aligned src word value. | ||
443 | * - r9 = (r2 < 64U). | ||
444 | * - r18 points one byte past the end of source memory. | ||
445 | */ | ||
446 | .Ldest_is_L2_line_aligned: | ||
447 | |||
448 | { | ||
449 | /* Not a full cache line remains. */ | ||
450 | bnz r9, .Lcleanup_unaligned_words | ||
451 | move r7, r6 | ||
452 | } | ||
453 | |||
454 | /* r2 >= 64 */ | ||
455 | |||
456 | /* Kick off two prefetches, but don't go past the end. */ | ||
457 | { addi r3, r1, 63 - 4; addi r8, r1, 64 + 63 - 4 } | ||
458 | { prefetch r3; move r3, r8; slt_u r8, r8, r18 } | ||
459 | { mvz r3, r8, r1; addi r8, r3, 64 } | ||
460 | { prefetch r3; move r3, r8; slt_u r8, r8, r18 } | ||
461 | { mvz r3, r8, r1; movei r17, 0 } | ||
462 | |||
463 | .Lcopy_unaligned_line: | ||
464 | /* Prefetch another line. */ | ||
465 | { prefetch r3; addi r15, r1, 60; addi r3, r3, 64 } | ||
466 | /* Fire off a load of the last word we are about to copy. */ | ||
467 | EX: { lw_na r15, r15; slt_u r8, r3, r18 } | ||
468 | |||
469 | EX: { mvz r3, r8, r1; wh64 r0 } | ||
470 | |||
471 | /* This loop runs twice. | ||
472 | * | ||
473 | * On entry: | ||
474 | * - r17 is even before the first iteration, and odd before | ||
475 | * the second. It is incremented inside the loop. Encountering | ||
476 | * an even value at the end of the loop makes it stop. | ||
477 | */ | ||
478 | .Lcopy_half_an_unaligned_line: | ||
479 | EX: { | ||
480 | /* Stall until the last byte is ready. In the steady state this | ||
481 | * guarantees all words to load below will be in the L2 cache, which | ||
482 | * avoids shunting the loads to the RTF. | ||
483 | */ | ||
484 | move zero, r15 | ||
485 | lwadd_na r7, r1, 16 | ||
486 | } | ||
487 | EX: { lwadd_na r11, r1, 12 } | ||
488 | EX: { lwadd_na r14, r1, -24 } | ||
489 | EX: { lwadd_na r8, r1, 4 } | ||
490 | EX: { lwadd_na r9, r1, 4 } | ||
491 | EX: { | ||
492 | lwadd_na r10, r1, 8 | ||
493 | /* r16 = (r2 < 64), after we subtract 32 from r2 below. */ | ||
494 | slti_u r16, r2, 64 + 32 | ||
495 | } | ||
496 | EX: { lwadd_na r12, r1, 4; addi r17, r17, 1 } | ||
497 | EX: { lwadd_na r13, r1, 8; dword_align r6, r7, r1 } | ||
498 | EX: { swadd r0, r6, 4; dword_align r7, r8, r1 } | ||
499 | EX: { swadd r0, r7, 4; dword_align r8, r9, r1 } | ||
500 | EX: { swadd r0, r8, 4; dword_align r9, r10, r1 } | ||
501 | EX: { swadd r0, r9, 4; dword_align r10, r11, r1 } | ||
502 | EX: { swadd r0, r10, 4; dword_align r11, r12, r1 } | ||
503 | EX: { swadd r0, r11, 4; dword_align r12, r13, r1 } | ||
504 | EX: { swadd r0, r12, 4; dword_align r13, r14, r1 } | ||
505 | EX: { swadd r0, r13, 4; addi r2, r2, -32 } | ||
506 | { move r6, r14; bbst r17, .Lcopy_half_an_unaligned_line } | ||
507 | |||
508 | { bzt r16, .Lcopy_unaligned_line; move r7, r6 } | ||
509 | |||
510 | /* On entry: | ||
511 | * - r0 is the next store address. | ||
512 | * - r1 points 4 bytes past the load address corresponding to r0. | ||
513 | * - r2 >= 0 (# of bytes left to store). | ||
514 | * - r7 is the next aligned src word value. | ||
515 | */ | ||
516 | .Lcleanup_unaligned_words: | ||
517 | /* Handle any trailing bytes. */ | ||
518 | { bz r2, .Lcopy_unaligned_done; slti_u r8, r2, 4 } | ||
519 | { bzt r8, .Lcopy_unaligned_src_words; move r6, r7 } | ||
520 | |||
521 | /* Move r1 back to the point where it corresponds to r0. */ | ||
522 | { addi r1, r1, -4 } | ||
523 | |||
524 | #else /* !CHIP_HAS_DWORD_ALIGN() */ | ||
525 | |||
526 | /* Compute right/left shift counts and load initial source words. */ | ||
527 | { andi r5, r1, -4; andi r3, r1, 3 } | ||
528 | EX: { lw r6, r5; addi r5, r5, 4; shli r3, r3, 3 } | ||
529 | EX: { lw r7, r5; addi r5, r5, 4; sub r4, zero, r3 } | ||
530 | |||
531 | /* Load and store one word at a time, using shifts and ORs | ||
532 | * to correct for the misaligned src. | ||
533 | */ | ||
534 | .Lcopy_unaligned_src_loop: | ||
535 | { shr r6, r6, r3; shl r8, r7, r4 } | ||
536 | EX: { lw r7, r5; or r8, r8, r6; move r6, r7 } | ||
537 | EX: { sw r0, r8; addi r0, r0, 4; addi r2, r2, -4 } | ||
538 | { addi r5, r5, 4; slti_u r8, r2, 8 } | ||
539 | { bzt r8, .Lcopy_unaligned_src_loop; addi r1, r1, 4 } | ||
540 | |||
541 | { bz r2, .Lcopy_unaligned_done } | ||
542 | #endif /* !CHIP_HAS_DWORD_ALIGN() */ | ||
543 | |||
544 | /* Fall through */ | ||
545 | |||
546 | /* | ||
547 | * | ||
548 | * 1 byte at a time copy handler. | ||
549 | * | ||
550 | */ | ||
551 | |||
552 | .Lcopy_unaligned_few: | ||
553 | EX: { lb_u r3, r1; addi r1, r1, 1 } | ||
554 | EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
555 | { bnzt r2, .Lcopy_unaligned_few } | ||
556 | |||
557 | .Lcopy_unaligned_done: | ||
558 | |||
559 | /* For memcpy return original dest address, else zero. */ | ||
560 | { mz r0, r29, r23; jrp lr } | ||
561 | |||
562 | .Lend_memcpy_common: | ||
563 | .size memcpy_common, .Lend_memcpy_common - memcpy_common | ||
564 | |||
565 | .section .fixup,"ax" | ||
566 | memcpy_common_fixup: | ||
567 | .type memcpy_common_fixup, @function | ||
568 | |||
569 | /* Skip any bytes we already successfully copied. | ||
570 | * r2 (num remaining) is correct, but r0 (dst) and r1 (src) | ||
571 | * may not be quite right because of unrolling and prefetching. | ||
572 | * So we need to recompute their values as the address just | ||
573 | * after the last byte we are sure was successfully loaded and | ||
574 | * then stored. | ||
575 | */ | ||
576 | |||
577 | /* Determine how many bytes we successfully copied. */ | ||
578 | { sub r3, r25, r2 } | ||
579 | |||
580 | /* Add this to the original r0 and r1 to get their new values. */ | ||
581 | { add r0, r23, r3; add r1, r24, r3 } | ||
582 | |||
583 | { bzt r29, memcpy_fixup_loop } | ||
584 | { blzt r29, copy_to_user_fixup_loop } | ||
585 | |||
586 | copy_from_user_fixup_loop: | ||
587 | /* Try copying the rest one byte at a time, expecting a load fault. */ | ||
588 | .Lcfu: { lb_u r3, r1; addi r1, r1, 1 } | ||
589 | { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
590 | { bnzt r2, copy_from_user_fixup_loop } | ||
591 | |||
592 | .Lcopy_from_user_fixup_zero_remainder: | ||
593 | { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ | ||
594 | /* byte-at-a-time loop faulted, so zero the rest. */ | ||
595 | { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ } | ||
596 | 1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 } | ||
597 | { bnzt r3, 1b } | ||
598 | 2: move lr, r27 | ||
599 | { move r0, r2; jrp lr } | ||
600 | |||
601 | copy_to_user_fixup_loop: | ||
602 | /* Try copying the rest one byte at a time, expecting a store fault. */ | ||
603 | { lb_u r3, r1; addi r1, r1, 1 } | ||
604 | .Lctu: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
605 | { bnzt r2, copy_to_user_fixup_loop } | ||
606 | .Lcopy_to_user_fixup_done: | ||
607 | move lr, r27 | ||
608 | { move r0, r2; jrp lr } | ||
609 | |||
610 | memcpy_fixup_loop: | ||
611 | /* Try copying the rest one byte at a time. We expect a disastrous | ||
612 | * fault to happen since we are in fixup code, but let it happen. | ||
613 | */ | ||
614 | { lb_u r3, r1; addi r1, r1, 1 } | ||
615 | { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
616 | { bnzt r2, memcpy_fixup_loop } | ||
617 | /* This should be unreachable, we should have faulted again. | ||
618 | * But be paranoid and handle it in case some interrupt changed | ||
619 | * the TLB or something. | ||
620 | */ | ||
621 | move lr, r27 | ||
622 | { move r0, r23; jrp lr } | ||
623 | |||
624 | .size memcpy_common_fixup, . - memcpy_common_fixup | ||
625 | |||
626 | .section __ex_table,"a" | ||
627 | .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder | ||
628 | .word .Lctu, .Lcopy_to_user_fixup_done | ||
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c new file mode 100644 index 000000000000..dfedea7b266b --- /dev/null +++ b/arch/tile/lib/memcpy_tile64.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/string.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <asm/fixmap.h> | ||
20 | #include <asm/kmap_types.h> | ||
21 | #include <asm/tlbflush.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | #include <arch/chip.h> | ||
24 | |||
25 | |||
26 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
27 | |||
28 | /* Defined in memcpy.S */ | ||
29 | extern unsigned long __memcpy_asm(void *to, const void *from, unsigned long n); | ||
30 | extern unsigned long __copy_to_user_inatomic_asm( | ||
31 | void __user *to, const void *from, unsigned long n); | ||
32 | extern unsigned long __copy_from_user_inatomic_asm( | ||
33 | void *to, const void __user *from, unsigned long n); | ||
34 | extern unsigned long __copy_from_user_zeroing_asm( | ||
35 | void *to, const void __user *from, unsigned long n); | ||
36 | |||
37 | typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long); | ||
38 | |||
39 | /* Size above which to consider TLB games for performance */ | ||
40 | #define LARGE_COPY_CUTOFF 2048 | ||
41 | |||
42 | /* Communicate to the simulator what we are trying to do. */ | ||
43 | #define sim_allow_multiple_caching(b) \ | ||
44 | __insn_mtspr(SPR_SIM_CONTROL, \ | ||
45 | SIM_CONTROL_ALLOW_MULTIPLE_CACHING | ((b) << _SIM_CONTROL_OPERATOR_BITS)) | ||
46 | |||
47 | /* | ||
48 | * Copy memory by briefly enabling incoherent cacheline-at-a-time mode. | ||
49 | * | ||
50 | * We set up our own source and destination PTEs that we fully control. | ||
51 | * This is the only way to guarantee that we don't race with another | ||
52 | * thread that is modifying the PTE; we can't afford to try the | ||
53 | * copy_{to,from}_user() technique of catching the interrupt, since | ||
54 | * we must run with interrupts disabled to avoid the risk of some | ||
55 | * other code seeing the incoherent data in our cache. (Recall that | ||
56 | * our cache is indexed by PA, so even if the other code doesn't use | ||
57 | * our KM_MEMCPY virtual addresses, they'll still hit in cache using | ||
58 | * the normal VAs that aren't supposed to hit in cache.) | ||
59 | */ | ||
60 | static void memcpy_multicache(void *dest, const void *source, | ||
61 | pte_t dst_pte, pte_t src_pte, int len) | ||
62 | { | ||
63 | int idx; | ||
64 | unsigned long flags, newsrc, newdst; | ||
65 | pmd_t *pmdp; | ||
66 | pte_t *ptep; | ||
67 | int cpu = get_cpu(); | ||
68 | |||
69 | /* | ||
70 | * Disable interrupts so that we don't recurse into memcpy() | ||
71 | * in an interrupt handler, nor accidentally reference | ||
72 | * the PA of the source from an interrupt routine. Also | ||
73 | * notify the simulator that we're playing games so we don't | ||
74 | * generate spurious coherency warnings. | ||
75 | */ | ||
76 | local_irq_save(flags); | ||
77 | sim_allow_multiple_caching(1); | ||
78 | |||
79 | /* Set up the new dest mapping */ | ||
80 | idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; | ||
81 | newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); | ||
82 | pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); | ||
83 | ptep = pte_offset_kernel(pmdp, newdst); | ||
84 | if (pte_val(*ptep) != pte_val(dst_pte)) { | ||
85 | set_pte(ptep, dst_pte); | ||
86 | local_flush_tlb_page(NULL, newdst, PAGE_SIZE); | ||
87 | } | ||
88 | |||
89 | /* Set up the new source mapping */ | ||
90 | idx += (KM_MEMCPY0 - KM_MEMCPY1); | ||
91 | src_pte = hv_pte_set_nc(src_pte); | ||
92 | src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ | ||
93 | newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); | ||
94 | pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc); | ||
95 | ptep = pte_offset_kernel(pmdp, newsrc); | ||
96 | *ptep = src_pte; /* set_pte() would be confused by this */ | ||
97 | local_flush_tlb_page(NULL, newsrc, PAGE_SIZE); | ||
98 | |||
99 | /* Actually move the data. */ | ||
100 | __memcpy_asm((void *)newdst, (const void *)newsrc, len); | ||
101 | |||
102 | /* | ||
103 | * Remap the source as locally-cached and not OLOC'ed so that | ||
104 | * we can inval without also invaling the remote cpu's cache. | ||
105 | * This also avoids known errata with inv'ing cacheable oloc data. | ||
106 | */ | ||
107 | src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3); | ||
108 | src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */ | ||
109 | *ptep = src_pte; /* set_pte() would be confused by this */ | ||
110 | local_flush_tlb_page(NULL, newsrc, PAGE_SIZE); | ||
111 | |||
112 | /* | ||
113 | * Do the actual invalidation, covering the full L2 cache line | ||
114 | * at the end since __memcpy_asm() is somewhat aggressive. | ||
115 | */ | ||
116 | __inv_buffer((void *)newsrc, len); | ||
117 | |||
118 | /* | ||
119 | * We're done: notify the simulator that all is back to normal, | ||
120 | * and re-enable interrupts and pre-emption. | ||
121 | */ | ||
122 | sim_allow_multiple_caching(0); | ||
123 | local_irq_restore(flags); | ||
124 | put_cpu(); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Identify large copies from remotely-cached memory, and copy them | ||
129 | * via memcpy_multicache() if they look good, otherwise fall back | ||
130 | * to the particular kind of copying passed as the memcpy_t function. | ||
131 | */ | ||
132 | static unsigned long fast_copy(void *dest, const void *source, int len, | ||
133 | memcpy_t func) | ||
134 | { | ||
135 | /* | ||
136 | * Check if it's big enough to bother with. We may end up doing a | ||
137 | * small copy via TLB manipulation if we're near a page boundary, | ||
138 | * but presumably we'll make it up when we hit the second page. | ||
139 | */ | ||
140 | while (len >= LARGE_COPY_CUTOFF) { | ||
141 | int copy_size, bytes_left_on_page; | ||
142 | pte_t *src_ptep, *dst_ptep; | ||
143 | pte_t src_pte, dst_pte; | ||
144 | struct page *src_page, *dst_page; | ||
145 | |||
146 | /* Is the source page oloc'ed to a remote cpu? */ | ||
147 | retry_source: | ||
148 | src_ptep = virt_to_pte(current->mm, (unsigned long)source); | ||
149 | if (src_ptep == NULL) | ||
150 | break; | ||
151 | src_pte = *src_ptep; | ||
152 | if (!hv_pte_get_present(src_pte) || | ||
153 | !hv_pte_get_readable(src_pte) || | ||
154 | hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) | ||
155 | break; | ||
156 | if (get_remote_cache_cpu(src_pte) == smp_processor_id()) | ||
157 | break; | ||
158 | src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); | ||
159 | get_page(src_page); | ||
160 | if (pte_val(src_pte) != pte_val(*src_ptep)) { | ||
161 | put_page(src_page); | ||
162 | goto retry_source; | ||
163 | } | ||
164 | if (pte_huge(src_pte)) { | ||
165 | /* Adjust the PTE to correspond to a small page */ | ||
166 | int pfn = hv_pte_get_pfn(src_pte); | ||
167 | pfn += (((unsigned long)source & (HPAGE_SIZE-1)) | ||
168 | >> PAGE_SHIFT); | ||
169 | src_pte = pfn_pte(pfn, src_pte); | ||
170 | src_pte = pte_mksmall(src_pte); | ||
171 | } | ||
172 | |||
173 | /* Is the destination page writable? */ | ||
174 | retry_dest: | ||
175 | dst_ptep = virt_to_pte(current->mm, (unsigned long)dest); | ||
176 | if (dst_ptep == NULL) { | ||
177 | put_page(src_page); | ||
178 | break; | ||
179 | } | ||
180 | dst_pte = *dst_ptep; | ||
181 | if (!hv_pte_get_present(dst_pte) || | ||
182 | !hv_pte_get_writable(dst_pte)) { | ||
183 | put_page(src_page); | ||
184 | break; | ||
185 | } | ||
186 | dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); | ||
187 | if (dst_page == src_page) { | ||
188 | /* | ||
189 | * Source and dest are on the same page; this | ||
190 | * potentially exposes us to incoherence if any | ||
191 | * part of src and dest overlap on a cache line. | ||
192 | * Just give up rather than trying to be precise. | ||
193 | */ | ||
194 | put_page(src_page); | ||
195 | break; | ||
196 | } | ||
197 | get_page(dst_page); | ||
198 | if (pte_val(dst_pte) != pte_val(*dst_ptep)) { | ||
199 | put_page(dst_page); | ||
200 | goto retry_dest; | ||
201 | } | ||
202 | if (pte_huge(dst_pte)) { | ||
203 | /* Adjust the PTE to correspond to a small page */ | ||
204 | int pfn = hv_pte_get_pfn(dst_pte); | ||
205 | pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) | ||
206 | >> PAGE_SHIFT); | ||
207 | dst_pte = pfn_pte(pfn, dst_pte); | ||
208 | dst_pte = pte_mksmall(dst_pte); | ||
209 | } | ||
210 | |||
211 | /* All looks good: create a cachable PTE and copy from it */ | ||
212 | copy_size = len; | ||
213 | bytes_left_on_page = | ||
214 | PAGE_SIZE - (((int)source) & (PAGE_SIZE-1)); | ||
215 | if (copy_size > bytes_left_on_page) | ||
216 | copy_size = bytes_left_on_page; | ||
217 | bytes_left_on_page = | ||
218 | PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1)); | ||
219 | if (copy_size > bytes_left_on_page) | ||
220 | copy_size = bytes_left_on_page; | ||
221 | memcpy_multicache(dest, source, dst_pte, src_pte, copy_size); | ||
222 | |||
223 | /* Release the pages */ | ||
224 | put_page(dst_page); | ||
225 | put_page(src_page); | ||
226 | |||
227 | /* Continue on the next page */ | ||
228 | dest += copy_size; | ||
229 | source += copy_size; | ||
230 | len -= copy_size; | ||
231 | } | ||
232 | |||
233 | return func(dest, source, len); | ||
234 | } | ||
235 | |||
236 | void *memcpy(void *to, const void *from, __kernel_size_t n) | ||
237 | { | ||
238 | if (n < LARGE_COPY_CUTOFF) | ||
239 | return (void *)__memcpy_asm(to, from, n); | ||
240 | else | ||
241 | return (void *)fast_copy(to, from, n, __memcpy_asm); | ||
242 | } | ||
243 | |||
244 | unsigned long __copy_to_user_inatomic(void __user *to, const void *from, | ||
245 | unsigned long n) | ||
246 | { | ||
247 | if (n < LARGE_COPY_CUTOFF) | ||
248 | return __copy_to_user_inatomic_asm(to, from, n); | ||
249 | else | ||
250 | return fast_copy(to, from, n, __copy_to_user_inatomic_asm); | ||
251 | } | ||
252 | |||
253 | unsigned long __copy_from_user_inatomic(void *to, const void __user *from, | ||
254 | unsigned long n) | ||
255 | { | ||
256 | if (n < LARGE_COPY_CUTOFF) | ||
257 | return __copy_from_user_inatomic_asm(to, from, n); | ||
258 | else | ||
259 | return fast_copy(to, from, n, __copy_from_user_inatomic_asm); | ||
260 | } | ||
261 | |||
262 | unsigned long __copy_from_user_zeroing(void *to, const void __user *from, | ||
263 | unsigned long n) | ||
264 | { | ||
265 | if (n < LARGE_COPY_CUTOFF) | ||
266 | return __copy_from_user_zeroing_asm(to, from, n); | ||
267 | else | ||
268 | return fast_copy(to, from, n, __copy_from_user_zeroing_asm); | ||
269 | } | ||
270 | |||
271 | #endif /* !CHIP_HAS_COHERENT_LOCAL_CACHE() */ | ||
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove_32.c new file mode 100644 index 000000000000..fd615ae6ade7 --- /dev/null +++ b/arch/tile/lib/memmove_32.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | void *memmove(void *dest, const void *src, size_t n) | ||
20 | { | ||
21 | if ((const char *)src >= (char *)dest + n | ||
22 | || (char *)dest >= (const char *)src + n) { | ||
23 | /* We found no overlap, so let memcpy do all the heavy | ||
24 | * lifting (prefetching, etc.) | ||
25 | */ | ||
26 | return memcpy(dest, src, n); | ||
27 | } | ||
28 | |||
29 | if (n != 0) { | ||
30 | const uint8_t *in; | ||
31 | uint8_t x; | ||
32 | uint8_t *out; | ||
33 | int stride; | ||
34 | |||
35 | if (src < dest) { | ||
36 | /* copy backwards */ | ||
37 | in = (const uint8_t *)src + n - 1; | ||
38 | out = (uint8_t *)dest + n - 1; | ||
39 | stride = -1; | ||
40 | } else { | ||
41 | /* copy forwards */ | ||
42 | in = (const uint8_t *)src; | ||
43 | out = (uint8_t *)dest; | ||
44 | stride = 1; | ||
45 | } | ||
46 | |||
47 | /* Manually software-pipeline this loop. */ | ||
48 | x = *in; | ||
49 | in += stride; | ||
50 | |||
51 | while (--n != 0) { | ||
52 | *out = x; | ||
53 | out += stride; | ||
54 | x = *in; | ||
55 | in += stride; | ||
56 | } | ||
57 | |||
58 | *out = x; | ||
59 | } | ||
60 | |||
61 | return dest; | ||
62 | } | ||
63 | EXPORT_SYMBOL(memmove); | ||
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c new file mode 100644 index 000000000000..bfde5d864df1 --- /dev/null +++ b/arch/tile/lib/memset_32.c | |||
@@ -0,0 +1,275 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <arch/chip.h> | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | |||
22 | void *memset(void *s, int c, size_t n) | ||
23 | { | ||
24 | uint32_t *out32; | ||
25 | int n32; | ||
26 | uint32_t v16, v32; | ||
27 | uint8_t *out8 = s; | ||
28 | #if !CHIP_HAS_WH64() | ||
29 | int ahead32; | ||
30 | #else | ||
31 | int to_align32; | ||
32 | #endif | ||
33 | |||
34 | /* Experimentation shows that a trivial tight loop is a win up until | ||
35 | * around a size of 20, where writing a word at a time starts to win. | ||
36 | */ | ||
37 | #define BYTE_CUTOFF 20 | ||
38 | |||
39 | #if BYTE_CUTOFF < 3 | ||
40 | /* This must be at least at least this big, or some code later | ||
41 | * on doesn't work. | ||
42 | */ | ||
43 | #error "BYTE_CUTOFF is too small" | ||
44 | #endif | ||
45 | |||
46 | if (n < BYTE_CUTOFF) { | ||
47 | /* Strangely, this turns out to be the tightest way to | ||
48 | * write this loop. | ||
49 | */ | ||
50 | if (n != 0) { | ||
51 | do { | ||
52 | /* Strangely, combining these into one line | ||
53 | * performs worse. | ||
54 | */ | ||
55 | *out8 = c; | ||
56 | out8++; | ||
57 | } while (--n != 0); | ||
58 | } | ||
59 | |||
60 | return s; | ||
61 | } | ||
62 | |||
63 | #if !CHIP_HAS_WH64() | ||
64 | /* Use a spare issue slot to start prefetching the first cache | ||
65 | * line early. This instruction is free as the store can be buried | ||
66 | * in otherwise idle issue slots doing ALU ops. | ||
67 | */ | ||
68 | __insn_prefetch(out8); | ||
69 | |||
70 | /* We prefetch the end so that a short memset that spans two cache | ||
71 | * lines gets some prefetching benefit. Again we believe this is free | ||
72 | * to issue. | ||
73 | */ | ||
74 | __insn_prefetch(&out8[n - 1]); | ||
75 | #endif /* !CHIP_HAS_WH64() */ | ||
76 | |||
77 | |||
78 | /* Align 'out8'. We know n >= 3 so this won't write past the end. */ | ||
79 | while (((uintptr_t) out8 & 3) != 0) { | ||
80 | *out8++ = c; | ||
81 | --n; | ||
82 | } | ||
83 | |||
84 | /* Align 'n'. */ | ||
85 | while (n & 3) | ||
86 | out8[--n] = c; | ||
87 | |||
88 | out32 = (uint32_t *) out8; | ||
89 | n32 = n >> 2; | ||
90 | |||
91 | /* Tile input byte out to 32 bits. */ | ||
92 | v16 = __insn_intlb(c, c); | ||
93 | v32 = __insn_intlh(v16, v16); | ||
94 | |||
95 | /* This must be at least 8 or the following loop doesn't work. */ | ||
96 | #define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4) | ||
97 | |||
98 | #if !CHIP_HAS_WH64() | ||
99 | |||
100 | ahead32 = CACHE_LINE_SIZE_IN_WORDS; | ||
101 | |||
102 | /* We already prefetched the first and last cache lines, so | ||
103 | * we only need to do more prefetching if we are storing | ||
104 | * to more than two cache lines. | ||
105 | */ | ||
106 | if (n32 > CACHE_LINE_SIZE_IN_WORDS * 2) { | ||
107 | int i; | ||
108 | |||
109 | /* Prefetch the next several cache lines. | ||
110 | * This is the setup code for the software-pipelined | ||
111 | * loop below. | ||
112 | */ | ||
113 | #define MAX_PREFETCH 5 | ||
114 | ahead32 = n32 & -CACHE_LINE_SIZE_IN_WORDS; | ||
115 | if (ahead32 > MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS) | ||
116 | ahead32 = MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS; | ||
117 | |||
118 | for (i = CACHE_LINE_SIZE_IN_WORDS; | ||
119 | i < ahead32; i += CACHE_LINE_SIZE_IN_WORDS) | ||
120 | __insn_prefetch(&out32[i]); | ||
121 | } | ||
122 | |||
123 | if (n32 > ahead32) { | ||
124 | while (1) { | ||
125 | int j; | ||
126 | |||
127 | /* Prefetch by reading one word several cache lines | ||
128 | * ahead. Since loads are non-blocking this will | ||
129 | * cause the full cache line to be read while we are | ||
130 | * finishing earlier cache lines. Using a store | ||
131 | * here causes microarchitectural performance | ||
132 | * problems where a victimizing store miss goes to | ||
133 | * the head of the retry FIFO and locks the pipe for | ||
134 | * a few cycles. So a few subsequent stores in this | ||
135 | * loop go into the retry FIFO, and then later | ||
136 | * stores see other stores to the same cache line | ||
137 | * are already in the retry FIFO and themselves go | ||
138 | * into the retry FIFO, filling it up and grinding | ||
139 | * to a halt waiting for the original miss to be | ||
140 | * satisfied. | ||
141 | */ | ||
142 | __insn_prefetch(&out32[ahead32]); | ||
143 | |||
144 | #if 1 | ||
145 | #if CACHE_LINE_SIZE_IN_WORDS % 4 != 0 | ||
146 | #error "Unhandled CACHE_LINE_SIZE_IN_WORDS" | ||
147 | #endif | ||
148 | |||
149 | n32 -= CACHE_LINE_SIZE_IN_WORDS; | ||
150 | |||
151 | /* Save icache space by only partially unrolling | ||
152 | * this loop. | ||
153 | */ | ||
154 | for (j = CACHE_LINE_SIZE_IN_WORDS / 4; j > 0; j--) { | ||
155 | *out32++ = v32; | ||
156 | *out32++ = v32; | ||
157 | *out32++ = v32; | ||
158 | *out32++ = v32; | ||
159 | } | ||
160 | #else | ||
161 | /* Unfortunately, due to a code generator flaw this | ||
162 | * allocates a separate register for each of these | ||
163 | * stores, which requires a large number of spills, | ||
164 | * which makes this procedure enormously bigger | ||
165 | * (something like 70%) | ||
166 | */ | ||
167 | *out32++ = v32; | ||
168 | *out32++ = v32; | ||
169 | *out32++ = v32; | ||
170 | *out32++ = v32; | ||
171 | *out32++ = v32; | ||
172 | *out32++ = v32; | ||
173 | *out32++ = v32; | ||
174 | *out32++ = v32; | ||
175 | *out32++ = v32; | ||
176 | *out32++ = v32; | ||
177 | *out32++ = v32; | ||
178 | *out32++ = v32; | ||
179 | *out32++ = v32; | ||
180 | *out32++ = v32; | ||
181 | *out32++ = v32; | ||
182 | n32 -= 16; | ||
183 | #endif | ||
184 | |||
185 | /* To save compiled code size, reuse this loop even | ||
186 | * when we run out of prefetching to do by dropping | ||
187 | * ahead32 down. | ||
188 | */ | ||
189 | if (n32 <= ahead32) { | ||
190 | /* Not even a full cache line left, | ||
191 | * so stop now. | ||
192 | */ | ||
193 | if (n32 < CACHE_LINE_SIZE_IN_WORDS) | ||
194 | break; | ||
195 | |||
196 | /* Choose a small enough value that we don't | ||
197 | * prefetch past the end. There's no sense | ||
198 | * in touching cache lines we don't have to. | ||
199 | */ | ||
200 | ahead32 = CACHE_LINE_SIZE_IN_WORDS - 1; | ||
201 | } | ||
202 | } | ||
203 | } | ||
204 | |||
205 | #else /* CHIP_HAS_WH64() */ | ||
206 | |||
207 | /* Determine how many words we need to emit before the 'out32' | ||
208 | * pointer becomes aligned modulo the cache line size. | ||
209 | */ | ||
210 | to_align32 = | ||
211 | (-((uintptr_t)out32 >> 2)) & (CACHE_LINE_SIZE_IN_WORDS - 1); | ||
212 | |||
213 | /* Only bother aligning and using wh64 if there is at least | ||
214 | * one full cache line to process. This check also prevents | ||
215 | * overrunning the end of the buffer with alignment words. | ||
216 | */ | ||
217 | if (to_align32 <= n32 - CACHE_LINE_SIZE_IN_WORDS) { | ||
218 | int lines_left; | ||
219 | |||
220 | /* Align out32 mod the cache line size so we can use wh64. */ | ||
221 | n32 -= to_align32; | ||
222 | for (; to_align32 != 0; to_align32--) { | ||
223 | *out32 = v32; | ||
224 | out32++; | ||
225 | } | ||
226 | |||
227 | /* Use unsigned divide to turn this into a right shift. */ | ||
228 | lines_left = (unsigned)n32 / CACHE_LINE_SIZE_IN_WORDS; | ||
229 | |||
230 | do { | ||
231 | /* Only wh64 a few lines at a time, so we don't | ||
232 | * exceed the maximum number of victim lines. | ||
233 | */ | ||
234 | int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS()) | ||
235 | ? lines_left | ||
236 | : CHIP_MAX_OUTSTANDING_VICTIMS()); | ||
237 | uint32_t *wh = out32; | ||
238 | int i = x; | ||
239 | int j; | ||
240 | |||
241 | lines_left -= x; | ||
242 | |||
243 | do { | ||
244 | __insn_wh64(wh); | ||
245 | wh += CACHE_LINE_SIZE_IN_WORDS; | ||
246 | } while (--i); | ||
247 | |||
248 | for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4); | ||
249 | j != 0; j--) { | ||
250 | *out32++ = v32; | ||
251 | *out32++ = v32; | ||
252 | *out32++ = v32; | ||
253 | *out32++ = v32; | ||
254 | } | ||
255 | } while (lines_left != 0); | ||
256 | |||
257 | /* We processed all full lines above, so only this many | ||
258 | * words remain to be processed. | ||
259 | */ | ||
260 | n32 &= CACHE_LINE_SIZE_IN_WORDS - 1; | ||
261 | } | ||
262 | |||
263 | #endif /* CHIP_HAS_WH64() */ | ||
264 | |||
265 | /* Now handle any leftover values. */ | ||
266 | if (n32 != 0) { | ||
267 | do { | ||
268 | *out32 = v32; | ||
269 | out32++; | ||
270 | } while (--n32 != 0); | ||
271 | } | ||
272 | |||
273 | return s; | ||
274 | } | ||
275 | EXPORT_SYMBOL(memset); | ||
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c new file mode 100644 index 000000000000..485e24d62c6b --- /dev/null +++ b/arch/tile/lib/spinlock_32.c | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <asm/processor.h> | ||
18 | |||
19 | #include "spinlock_common.h" | ||
20 | |||
21 | void arch_spin_lock(arch_spinlock_t *lock) | ||
22 | { | ||
23 | int my_ticket; | ||
24 | int iterations = 0; | ||
25 | int delta; | ||
26 | |||
27 | while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1) | ||
28 | delay_backoff(iterations++); | ||
29 | |||
30 | /* Increment the next ticket number, implicitly releasing tns lock. */ | ||
31 | lock->next_ticket = my_ticket + TICKET_QUANTUM; | ||
32 | |||
33 | /* Wait until it's our turn. */ | ||
34 | while ((delta = my_ticket - lock->current_ticket) != 0) | ||
35 | relax((128 / CYCLES_PER_RELAX_LOOP) * delta); | ||
36 | } | ||
37 | EXPORT_SYMBOL(arch_spin_lock); | ||
38 | |||
39 | int arch_spin_trylock(arch_spinlock_t *lock) | ||
40 | { | ||
41 | /* | ||
42 | * Grab a ticket; no need to retry if it's busy, we'll just | ||
43 | * treat that the same as "locked", since someone else | ||
44 | * will lock it momentarily anyway. | ||
45 | */ | ||
46 | int my_ticket = __insn_tns((void *)&lock->next_ticket); | ||
47 | |||
48 | if (my_ticket == lock->current_ticket) { | ||
49 | /* Not currently locked, so lock it by keeping this ticket. */ | ||
50 | lock->next_ticket = my_ticket + TICKET_QUANTUM; | ||
51 | /* Success! */ | ||
52 | return 1; | ||
53 | } | ||
54 | |||
55 | if (!(my_ticket & 1)) { | ||
56 | /* Release next_ticket. */ | ||
57 | lock->next_ticket = my_ticket; | ||
58 | } | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | EXPORT_SYMBOL(arch_spin_trylock); | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
65 | { | ||
66 | u32 iterations = 0; | ||
67 | while (arch_spin_is_locked(lock)) | ||
68 | delay_backoff(iterations++); | ||
69 | } | ||
70 | EXPORT_SYMBOL(arch_spin_unlock_wait); | ||
71 | |||
72 | /* | ||
73 | * The low byte is always reserved to be the marker for a "tns" operation | ||
74 | * since the low bit is set to "1" by a tns. The next seven bits are | ||
75 | * zeroes. The next byte holds the "next" writer value, i.e. the ticket | ||
76 | * available for the next task that wants to write. The third byte holds | ||
77 | * the current writer value, i.e. the writer who holds the current ticket. | ||
78 | * If current == next == 0, there are no interested writers. | ||
79 | */ | ||
80 | #define WR_NEXT_SHIFT _WR_NEXT_SHIFT | ||
81 | #define WR_CURR_SHIFT _WR_CURR_SHIFT | ||
82 | #define WR_WIDTH _WR_WIDTH | ||
83 | #define WR_MASK ((1 << WR_WIDTH) - 1) | ||
84 | |||
85 | /* | ||
86 | * The last eight bits hold the active reader count. This has to be | ||
87 | * zero before a writer can start to write. | ||
88 | */ | ||
89 | #define RD_COUNT_SHIFT _RD_COUNT_SHIFT | ||
90 | #define RD_COUNT_WIDTH _RD_COUNT_WIDTH | ||
91 | #define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1) | ||
92 | |||
93 | |||
94 | /* Lock the word, spinning until there are no tns-ers. */ | ||
95 | static inline u32 get_rwlock(arch_rwlock_t *rwlock) | ||
96 | { | ||
97 | u32 iterations = 0; | ||
98 | for (;;) { | ||
99 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
100 | if (unlikely(val & 1)) { | ||
101 | delay_backoff(iterations++); | ||
102 | continue; | ||
103 | } | ||
104 | return val; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | int arch_read_trylock_slow(arch_rwlock_t *rwlock) | ||
109 | { | ||
110 | u32 val = get_rwlock(rwlock); | ||
111 | int locked = (val << RD_COUNT_WIDTH) == 0; | ||
112 | rwlock->lock = val + (locked << RD_COUNT_SHIFT); | ||
113 | return locked; | ||
114 | } | ||
115 | EXPORT_SYMBOL(arch_read_trylock_slow); | ||
116 | |||
117 | void arch_read_unlock_slow(arch_rwlock_t *rwlock) | ||
118 | { | ||
119 | u32 val = get_rwlock(rwlock); | ||
120 | rwlock->lock = val - (1 << RD_COUNT_SHIFT); | ||
121 | } | ||
122 | EXPORT_SYMBOL(arch_read_unlock_slow); | ||
123 | |||
124 | void arch_write_unlock_slow(arch_rwlock_t *rwlock, u32 val) | ||
125 | { | ||
126 | u32 eq, mask = 1 << WR_CURR_SHIFT; | ||
127 | while (unlikely(val & 1)) { | ||
128 | /* Limited backoff since we are the highest-priority task. */ | ||
129 | relax(4); | ||
130 | val = __insn_tns((int *)&rwlock->lock); | ||
131 | } | ||
132 | val = __insn_addb(val, mask); | ||
133 | eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT)); | ||
134 | val = __insn_mz(eq & mask, val); | ||
135 | rwlock->lock = val; | ||
136 | } | ||
137 | EXPORT_SYMBOL(arch_write_unlock_slow); | ||
138 | |||
139 | /* | ||
140 | * We spin until everything but the reader bits (which are in the high | ||
141 | * part of the word) are zero, i.e. no active or waiting writers, no tns. | ||
142 | * | ||
143 | * ISSUE: This approach can permanently starve readers. A reader who sees | ||
144 | * a writer could instead take a ticket lock (just like a writer would), | ||
145 | * and atomically enter read mode (with 1 reader) when it gets the ticket. | ||
146 | * This way both readers and writers will always make forward progress | ||
147 | * in a finite time. | ||
148 | */ | ||
149 | void arch_read_lock_slow(arch_rwlock_t *rwlock, u32 val) | ||
150 | { | ||
151 | u32 iterations = 0; | ||
152 | do { | ||
153 | if (!(val & 1)) | ||
154 | rwlock->lock = val; | ||
155 | delay_backoff(iterations++); | ||
156 | val = __insn_tns((int *)&rwlock->lock); | ||
157 | } while ((val << RD_COUNT_WIDTH) != 0); | ||
158 | rwlock->lock = val + (1 << RD_COUNT_SHIFT); | ||
159 | } | ||
160 | EXPORT_SYMBOL(arch_read_lock_slow); | ||
161 | |||
162 | void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val) | ||
163 | { | ||
164 | /* | ||
165 | * The trailing underscore on this variable (and curr_ below) | ||
166 | * reminds us that the high bits are garbage; we mask them out | ||
167 | * when we compare them. | ||
168 | */ | ||
169 | u32 my_ticket_; | ||
170 | |||
171 | /* Take out the next ticket; this will also stop would-be readers. */ | ||
172 | if (val & 1) | ||
173 | val = get_rwlock(rwlock); | ||
174 | rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); | ||
175 | |||
176 | /* Extract my ticket value from the original word. */ | ||
177 | my_ticket_ = val >> WR_NEXT_SHIFT; | ||
178 | |||
179 | /* | ||
180 | * Wait until the "current" field matches our ticket, and | ||
181 | * there are no remaining readers. | ||
182 | */ | ||
183 | for (;;) { | ||
184 | u32 curr_ = val >> WR_CURR_SHIFT; | ||
185 | u32 readers = val >> RD_COUNT_SHIFT; | ||
186 | u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers; | ||
187 | if (likely(delta == 0)) | ||
188 | break; | ||
189 | |||
190 | /* Delay based on how many lock-holders are still out there. */ | ||
191 | relax((256 / CYCLES_PER_RELAX_LOOP) * delta); | ||
192 | |||
193 | /* | ||
194 | * Get a non-tns value to check; we don't need to tns | ||
195 | * it ourselves. Since we're not tns'ing, we retry | ||
196 | * more rapidly to get a valid value. | ||
197 | */ | ||
198 | while ((val = rwlock->lock) & 1) | ||
199 | relax(4); | ||
200 | } | ||
201 | } | ||
202 | EXPORT_SYMBOL(arch_write_lock_slow); | ||
203 | |||
204 | int __tns_atomic_acquire(atomic_t *lock) | ||
205 | { | ||
206 | int ret; | ||
207 | u32 iterations = 0; | ||
208 | |||
209 | BUG_ON(__insn_mfspr(SPR_INTERRUPT_CRITICAL_SECTION)); | ||
210 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); | ||
211 | |||
212 | while ((ret = __insn_tns((void *)&lock->counter)) == 1) | ||
213 | delay_backoff(iterations++); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | void __tns_atomic_release(atomic_t *p, int v) | ||
218 | { | ||
219 | p->counter = v; | ||
220 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); | ||
221 | } | ||
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h new file mode 100644 index 000000000000..c10109809132 --- /dev/null +++ b/arch/tile/lib/spinlock_common.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * This file is included into spinlock_32.c or _64.c. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * The mfspr in __spinlock_relax() is 5 or 6 cycles plus 2 for loop | ||
18 | * overhead. | ||
19 | */ | ||
20 | #ifdef __tilegx__ | ||
21 | #define CYCLES_PER_RELAX_LOOP 7 | ||
22 | #else | ||
23 | #define CYCLES_PER_RELAX_LOOP 8 | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Idle the core for CYCLES_PER_RELAX_LOOP * iterations cycles. | ||
28 | */ | ||
29 | static inline void | ||
30 | relax(int iterations) | ||
31 | { | ||
32 | for (/*above*/; iterations > 0; iterations--) | ||
33 | __insn_mfspr(SPR_PASS); | ||
34 | barrier(); | ||
35 | } | ||
36 | |||
37 | /* Perform bounded exponential backoff.*/ | ||
38 | static void delay_backoff(int iterations) | ||
39 | { | ||
40 | u32 exponent, loops; | ||
41 | |||
42 | /* | ||
43 | * 2^exponent is how many times we go around the loop, | ||
44 | * which takes 8 cycles. We want to start with a 16- to 31-cycle | ||
45 | * loop, so we need to go around minimum 2 = 2^1 times, so we | ||
46 | * bias the original value up by 1. | ||
47 | */ | ||
48 | exponent = iterations + 1; | ||
49 | |||
50 | /* | ||
51 | * Don't allow exponent to exceed 7, so we have 128 loops, | ||
52 | * or 1,024 (to 2,047) cycles, as our maximum. | ||
53 | */ | ||
54 | if (exponent > 8) | ||
55 | exponent = 8; | ||
56 | |||
57 | loops = 1 << exponent; | ||
58 | |||
59 | /* Add a randomness factor so two cpus never get in lock step. */ | ||
60 | loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & | ||
61 | (loops - 1); | ||
62 | |||
63 | relax(1 << exponent); | ||
64 | } | ||
diff --git a/arch/tile/lib/strchr_32.c b/arch/tile/lib/strchr_32.c new file mode 100644 index 000000000000..c94e6f7ae7b5 --- /dev/null +++ b/arch/tile/lib/strchr_32.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | #undef strchr | ||
20 | |||
21 | char *strchr(const char *s, int c) | ||
22 | { | ||
23 | int z, g; | ||
24 | |||
25 | /* Get an aligned pointer. */ | ||
26 | const uintptr_t s_int = (uintptr_t) s; | ||
27 | const uint32_t *p = (const uint32_t *)(s_int & -4); | ||
28 | |||
29 | /* Create four copies of the byte for which we are looking. */ | ||
30 | const uint32_t goal = 0x01010101 * (uint8_t) c; | ||
31 | |||
32 | /* Read the first aligned word, but force bytes before the string to | ||
33 | * match neither zero nor goal (we make sure the high bit of each | ||
34 | * byte is 1, and the low 7 bits are all the opposite of the goal | ||
35 | * byte). | ||
36 | * | ||
37 | * Note that this shift count expression works because we know shift | ||
38 | * counts are taken mod 32. | ||
39 | */ | ||
40 | const uint32_t before_mask = (1 << (s_int << 3)) - 1; | ||
41 | uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib(before_mask, 1)); | ||
42 | |||
43 | uint32_t zero_matches, goal_matches; | ||
44 | while (1) { | ||
45 | /* Look for a terminating '\0'. */ | ||
46 | zero_matches = __insn_seqb(v, 0); | ||
47 | |||
48 | /* Look for the goal byte. */ | ||
49 | goal_matches = __insn_seqb(v, goal); | ||
50 | |||
51 | if (__builtin_expect(zero_matches | goal_matches, 0)) | ||
52 | break; | ||
53 | |||
54 | v = *++p; | ||
55 | } | ||
56 | |||
57 | z = __insn_ctz(zero_matches); | ||
58 | g = __insn_ctz(goal_matches); | ||
59 | |||
60 | /* If we found c before '\0' we got a match. Note that if c == '\0' | ||
61 | * then g == z, and we correctly return the address of the '\0' | ||
62 | * rather than NULL. | ||
63 | */ | ||
64 | return (g <= z) ? ((char *)p) + (g >> 3) : NULL; | ||
65 | } | ||
66 | EXPORT_SYMBOL(strchr); | ||
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c new file mode 100644 index 000000000000..f26f88e11e4a --- /dev/null +++ b/arch/tile/lib/strlen_32.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | size_t strlen(const char *s) | ||
20 | { | ||
21 | /* Get an aligned pointer. */ | ||
22 | const uintptr_t s_int = (uintptr_t) s; | ||
23 | const uint32_t *p = (const uint32_t *)(s_int & -4); | ||
24 | |||
25 | /* Read the first word, but force bytes before the string to be nonzero. | ||
26 | * This expression works because we know shift counts are taken mod 32. | ||
27 | */ | ||
28 | uint32_t v = *p | ((1 << (s_int << 3)) - 1); | ||
29 | |||
30 | uint32_t bits; | ||
31 | while ((bits = __insn_seqb(v, 0)) == 0) | ||
32 | v = *++p; | ||
33 | |||
34 | return ((const char *)p) + (__insn_ctz(bits) >> 3) - s; | ||
35 | } | ||
36 | EXPORT_SYMBOL(strlen); | ||
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c new file mode 100644 index 000000000000..f8d398c9ee7f --- /dev/null +++ b/arch/tile/lib/uaccess.c | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | int __range_ok(unsigned long addr, unsigned long size) | ||
19 | { | ||
20 | unsigned long limit = current_thread_info()->addr_limit.seg; | ||
21 | return !((addr < limit && size <= limit - addr) || | ||
22 | is_arch_mappable_range(addr, size)); | ||
23 | } | ||
24 | EXPORT_SYMBOL(__range_ok); | ||
25 | |||
26 | #ifdef CONFIG_DEBUG_COPY_FROM_USER | ||
27 | void copy_from_user_overflow(void) | ||
28 | { | ||
29 | WARN(1, "Buffer overflow detected!\n"); | ||
30 | } | ||
31 | EXPORT_SYMBOL(copy_from_user_overflow); | ||
32 | #endif | ||
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S new file mode 100644 index 000000000000..979f76d83746 --- /dev/null +++ b/arch/tile/lib/usercopy_32.S | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <asm/cache.h> | ||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* Access user memory, but use MMU to avoid propagating kernel exceptions. */ | ||
21 | |||
22 | .pushsection .fixup,"ax" | ||
23 | |||
24 | get_user_fault: | ||
25 | { move r0, zero; move r1, zero } | ||
26 | { movei r2, -EFAULT; jrp lr } | ||
27 | ENDPROC(get_user_fault) | ||
28 | |||
29 | put_user_fault: | ||
30 | { movei r0, -EFAULT; jrp lr } | ||
31 | ENDPROC(put_user_fault) | ||
32 | |||
33 | .popsection | ||
34 | |||
35 | /* | ||
36 | * __get_user_N functions take a pointer in r0, and return 0 in r2 | ||
37 | * on success, with the value in r0; or else -EFAULT in r2. | ||
38 | */ | ||
39 | #define __get_user_N(bytes, LOAD) \ | ||
40 | STD_ENTRY(__get_user_##bytes); \ | ||
41 | 1: { LOAD r0, r0; move r1, zero; move r2, zero }; \ | ||
42 | jrp lr; \ | ||
43 | STD_ENDPROC(__get_user_##bytes); \ | ||
44 | .pushsection __ex_table,"a"; \ | ||
45 | .word 1b, get_user_fault; \ | ||
46 | .popsection | ||
47 | |||
48 | __get_user_N(1, lb_u) | ||
49 | __get_user_N(2, lh_u) | ||
50 | __get_user_N(4, lw) | ||
51 | |||
52 | /* | ||
53 | * __get_user_8 takes a pointer in r0, and returns 0 in r2 | ||
54 | * on success, with the value in r0/r1; or else -EFAULT in r2. | ||
55 | */ | ||
56 | STD_ENTRY(__get_user_8); | ||
57 | 1: { lw r0, r0; addi r1, r0, 4 }; | ||
58 | 2: { lw r1, r1; move r2, zero }; | ||
59 | jrp lr; | ||
60 | STD_ENDPROC(__get_user_8); | ||
61 | .pushsection __ex_table,"a"; | ||
62 | .word 1b, get_user_fault; | ||
63 | .word 2b, get_user_fault; | ||
64 | .popsection | ||
65 | |||
66 | /* | ||
67 | * __put_user_N functions take a value in r0 and a pointer in r1, | ||
68 | * and return 0 in r0 on success or -EFAULT on failure. | ||
69 | */ | ||
70 | #define __put_user_N(bytes, STORE) \ | ||
71 | STD_ENTRY(__put_user_##bytes); \ | ||
72 | 1: { STORE r1, r0; move r0, zero }; \ | ||
73 | jrp lr; \ | ||
74 | STD_ENDPROC(__put_user_##bytes); \ | ||
75 | .pushsection __ex_table,"a"; \ | ||
76 | .word 1b, put_user_fault; \ | ||
77 | .popsection | ||
78 | |||
79 | __put_user_N(1, sb) | ||
80 | __put_user_N(2, sh) | ||
81 | __put_user_N(4, sw) | ||
82 | |||
83 | /* | ||
84 | * __put_user_8 takes a value in r0/r1 and a pointer in r2, | ||
85 | * and returns 0 in r0 on success or -EFAULT on failure. | ||
86 | */ | ||
87 | STD_ENTRY(__put_user_8) | ||
88 | 1: { sw r2, r0; addi r2, r2, 4 } | ||
89 | 2: { sw r2, r1; move r0, zero } | ||
90 | jrp lr | ||
91 | STD_ENDPROC(__put_user_8) | ||
92 | .pushsection __ex_table,"a" | ||
93 | .word 1b, put_user_fault | ||
94 | .word 2b, put_user_fault | ||
95 | .popsection | ||
96 | |||
97 | |||
98 | /* | ||
99 | * strnlen_user_asm takes the pointer in r0, and the length bound in r1. | ||
100 | * It returns the length, including the terminating NUL, or zero on exception. | ||
101 | * If length is greater than the bound, returns one plus the bound. | ||
102 | */ | ||
103 | STD_ENTRY(strnlen_user_asm) | ||
104 | { bz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */ | ||
105 | 1: { lb_u r4, r0; addi r1, r1, -1 } | ||
106 | bz r4, 2f | ||
107 | { bnzt r1, 1b; addi r0, r0, 1 } | ||
108 | 2: { sub r0, r0, r3; jrp lr } | ||
109 | STD_ENDPROC(strnlen_user_asm) | ||
110 | .pushsection .fixup,"ax" | ||
111 | strnlen_user_fault: | ||
112 | { move r0, zero; jrp lr } | ||
113 | ENDPROC(strnlen_user_fault) | ||
114 | .section __ex_table,"a" | ||
115 | .word 1b, strnlen_user_fault | ||
116 | .popsection | ||
117 | |||
118 | /* | ||
119 | * strncpy_from_user_asm takes the kernel target pointer in r0, | ||
120 | * the userspace source pointer in r1, and the length bound (including | ||
121 | * the trailing NUL) in r2. On success, it returns the string length | ||
122 | * (not including the trailing NUL), or -EFAULT on failure. | ||
123 | */ | ||
124 | STD_ENTRY(strncpy_from_user_asm) | ||
125 | { bz r2, 2f; move r3, r0 } | ||
126 | 1: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } | ||
127 | { sb r0, r4; addi r0, r0, 1 } | ||
128 | bz r2, 2f | ||
129 | bnzt r4, 1b | ||
130 | addi r0, r0, -1 /* don't count the trailing NUL */ | ||
131 | 2: { sub r0, r0, r3; jrp lr } | ||
132 | STD_ENDPROC(strncpy_from_user_asm) | ||
133 | .pushsection .fixup,"ax" | ||
134 | strncpy_from_user_fault: | ||
135 | { movei r0, -EFAULT; jrp lr } | ||
136 | ENDPROC(strncpy_from_user_fault) | ||
137 | .section __ex_table,"a" | ||
138 | .word 1b, strncpy_from_user_fault | ||
139 | .popsection | ||
140 | |||
141 | /* | ||
142 | * clear_user_asm takes the user target address in r0 and the | ||
143 | * number of bytes to zero in r1. | ||
144 | * It returns the number of uncopiable bytes (hopefully zero) in r0. | ||
145 | * Note that we don't use a separate .fixup section here since we fall | ||
146 | * through into the "fixup" code as the last straight-line bundle anyway. | ||
147 | */ | ||
148 | STD_ENTRY(clear_user_asm) | ||
149 | { bz r1, 2f; or r2, r0, r1 } | ||
150 | andi r2, r2, 3 | ||
151 | bzt r2, .Lclear_aligned_user_asm | ||
152 | 1: { sb r0, zero; addi r0, r0, 1; addi r1, r1, -1 } | ||
153 | bnzt r1, 1b | ||
154 | 2: { move r0, r1; jrp lr } | ||
155 | .pushsection __ex_table,"a" | ||
156 | .word 1b, 2b | ||
157 | .popsection | ||
158 | |||
159 | .Lclear_aligned_user_asm: | ||
160 | 1: { sw r0, zero; addi r0, r0, 4; addi r1, r1, -4 } | ||
161 | bnzt r1, 1b | ||
162 | 2: { move r0, r1; jrp lr } | ||
163 | STD_ENDPROC(clear_user_asm) | ||
164 | .pushsection __ex_table,"a" | ||
165 | .word 1b, 2b | ||
166 | .popsection | ||
167 | |||
168 | /* | ||
169 | * flush_user_asm takes the user target address in r0 and the | ||
170 | * number of bytes to flush in r1. | ||
171 | * It returns the number of unflushable bytes (hopefully zero) in r0. | ||
172 | */ | ||
173 | STD_ENTRY(flush_user_asm) | ||
174 | bz r1, 2f | ||
175 | { movei r2, L2_CACHE_BYTES; add r1, r0, r1 } | ||
176 | { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 } | ||
177 | { and r0, r0, r2; and r1, r1, r2 } | ||
178 | { sub r1, r1, r0 } | ||
179 | 1: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() } | ||
180 | { addi r0, r0, CHIP_FLUSH_STRIDE(); bnzt r1, 1b } | ||
181 | 2: { move r0, r1; jrp lr } | ||
182 | STD_ENDPROC(flush_user_asm) | ||
183 | .pushsection __ex_table,"a" | ||
184 | .word 1b, 2b | ||
185 | .popsection | ||
186 | |||
187 | /* | ||
188 | * inv_user_asm takes the user target address in r0 and the | ||
189 | * number of bytes to invalidate in r1. | ||
190 | * It returns the number of not inv'able bytes (hopefully zero) in r0. | ||
191 | */ | ||
192 | STD_ENTRY(inv_user_asm) | ||
193 | bz r1, 2f | ||
194 | { movei r2, L2_CACHE_BYTES; add r1, r0, r1 } | ||
195 | { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 } | ||
196 | { and r0, r0, r2; and r1, r1, r2 } | ||
197 | { sub r1, r1, r0 } | ||
198 | 1: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() } | ||
199 | { addi r0, r0, CHIP_INV_STRIDE(); bnzt r1, 1b } | ||
200 | 2: { move r0, r1; jrp lr } | ||
201 | STD_ENDPROC(inv_user_asm) | ||
202 | .pushsection __ex_table,"a" | ||
203 | .word 1b, 2b | ||
204 | .popsection | ||
205 | |||
206 | /* | ||
207 | * finv_user_asm takes the user target address in r0 and the | ||
208 | * number of bytes to flush-invalidate in r1. | ||
209 | * It returns the number of not finv'able bytes (hopefully zero) in r0. | ||
210 | */ | ||
211 | STD_ENTRY(finv_user_asm) | ||
212 | bz r1, 2f | ||
213 | { movei r2, L2_CACHE_BYTES; add r1, r0, r1 } | ||
214 | { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 } | ||
215 | { and r0, r0, r2; and r1, r1, r2 } | ||
216 | { sub r1, r1, r0 } | ||
217 | 1: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() } | ||
218 | { addi r0, r0, CHIP_FINV_STRIDE(); bnzt r1, 1b } | ||
219 | 2: { move r0, r1; jrp lr } | ||
220 | STD_ENDPROC(finv_user_asm) | ||
221 | .pushsection __ex_table,"a" | ||
222 | .word 1b, 2b | ||
223 | .popsection | ||
diff --git a/arch/tile/mm/Makefile b/arch/tile/mm/Makefile new file mode 100644 index 000000000000..e252aeddc17d --- /dev/null +++ b/arch/tile/mm/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the linux tile-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o pgtable.o fault.o extable.o elf.o \ | ||
6 | mmap.o homecache.o migrate_$(BITS).o | ||
7 | |||
8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
9 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c new file mode 100644 index 000000000000..55e58e93bfc5 --- /dev/null +++ b/arch/tile/mm/elf.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/binfmts.h> | ||
18 | #include <linux/compat.h> | ||
19 | #include <linux/mman.h> | ||
20 | #include <linux/elf.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/sections.h> | ||
24 | |||
25 | /* Notify a running simulator, if any, that an exec just occurred. */ | ||
26 | static void sim_notify_exec(const char *binary_name) | ||
27 | { | ||
28 | unsigned char c; | ||
29 | do { | ||
30 | c = *binary_name++; | ||
31 | __insn_mtspr(SPR_SIM_CONTROL, | ||
32 | (SIM_CONTROL_OS_EXEC | ||
33 | | (c << _SIM_CONTROL_OPERATOR_BITS))); | ||
34 | |||
35 | } while (c); | ||
36 | } | ||
37 | |||
38 | static int notify_exec(void) | ||
39 | { | ||
40 | int retval = 0; /* failure */ | ||
41 | struct vm_area_struct *vma = current->mm->mmap; | ||
42 | while (vma) { | ||
43 | if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) | ||
44 | break; | ||
45 | vma = vma->vm_next; | ||
46 | } | ||
47 | if (vma) { | ||
48 | char *buf = (char *) __get_free_page(GFP_KERNEL); | ||
49 | if (buf) { | ||
50 | char *path = d_path(&vma->vm_file->f_path, | ||
51 | buf, PAGE_SIZE); | ||
52 | if (!IS_ERR(path)) { | ||
53 | sim_notify_exec(path); | ||
54 | retval = 1; | ||
55 | } | ||
56 | free_page((unsigned long)buf); | ||
57 | } | ||
58 | } | ||
59 | return retval; | ||
60 | } | ||
61 | |||
62 | /* Notify a running simulator, if any, that we loaded an interpreter. */ | ||
63 | static void sim_notify_interp(unsigned long load_addr) | ||
64 | { | ||
65 | size_t i; | ||
66 | for (i = 0; i < sizeof(load_addr); i++) { | ||
67 | unsigned char c = load_addr >> (i * 8); | ||
68 | __insn_mtspr(SPR_SIM_CONTROL, | ||
69 | (SIM_CONTROL_OS_INTERP | ||
70 | | (c << _SIM_CONTROL_OPERATOR_BITS))); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | |||
75 | /* Kernel address of page used to map read-only kernel data into userspace. */ | ||
76 | static void *vdso_page; | ||
77 | |||
78 | /* One-entry array used for install_special_mapping. */ | ||
79 | static struct page *vdso_pages[1]; | ||
80 | |||
81 | static int __init vdso_setup(void) | ||
82 | { | ||
83 | vdso_page = (void *)get_zeroed_page(GFP_ATOMIC); | ||
84 | memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn); | ||
85 | vdso_pages[0] = virt_to_page(vdso_page); | ||
86 | return 0; | ||
87 | } | ||
88 | device_initcall(vdso_setup); | ||
89 | |||
90 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
91 | { | ||
92 | if (vma->vm_private_data == vdso_pages) | ||
93 | return "[vdso]"; | ||
94 | #ifndef __tilegx__ | ||
95 | if (vma->vm_start == MEM_USER_INTRPT) | ||
96 | return "[intrpt]"; | ||
97 | #endif | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
102 | int executable_stack) | ||
103 | { | ||
104 | struct mm_struct *mm = current->mm; | ||
105 | unsigned long vdso_base; | ||
106 | int retval = 0; | ||
107 | |||
108 | /* | ||
109 | * Notify the simulator that an exec just occurred. | ||
110 | * If we can't find the filename of the mapping, just use | ||
111 | * whatever was passed as the linux_binprm filename. | ||
112 | */ | ||
113 | if (!notify_exec()) | ||
114 | sim_notify_exec(bprm->filename); | ||
115 | |||
116 | down_write(&mm->mmap_sem); | ||
117 | |||
118 | /* | ||
119 | * MAYWRITE to allow gdb to COW and set breakpoints | ||
120 | * | ||
121 | * Make sure the vDSO gets into every core dump. Dumping its | ||
122 | * contents makes post-mortem fully interpretable later | ||
123 | * without matching up the same kernel and hardware config to | ||
124 | * see what PC values meant. | ||
125 | */ | ||
126 | vdso_base = VDSO_BASE; | ||
127 | retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, | ||
128 | VM_READ|VM_EXEC| | ||
129 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
130 | VM_ALWAYSDUMP, | ||
131 | vdso_pages); | ||
132 | |||
133 | #ifndef __tilegx__ | ||
134 | /* | ||
135 | * Set up a user-interrupt mapping here; the user can't | ||
136 | * create one themselves since it is above TASK_SIZE. | ||
137 | * We make it unwritable by default, so the model for adding | ||
138 | * interrupt vectors always involves an mprotect. | ||
139 | */ | ||
140 | if (!retval) { | ||
141 | unsigned long addr = MEM_USER_INTRPT; | ||
142 | addr = mmap_region(NULL, addr, INTRPT_SIZE, | ||
143 | MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, | ||
144 | VM_READ|VM_EXEC| | ||
145 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); | ||
146 | if (addr > (unsigned long) -PAGE_SIZE) | ||
147 | retval = (int) addr; | ||
148 | } | ||
149 | #endif | ||
150 | |||
151 | up_write(&mm->mmap_sem); | ||
152 | |||
153 | return retval; | ||
154 | } | ||
155 | |||
156 | |||
157 | void elf_plat_init(struct pt_regs *regs, unsigned long load_addr) | ||
158 | { | ||
159 | /* Zero all registers. */ | ||
160 | memset(regs, 0, sizeof(*regs)); | ||
161 | |||
162 | /* Report the interpreter's load address. */ | ||
163 | sim_notify_interp(load_addr); | ||
164 | } | ||
diff --git a/arch/tile/mm/extable.c b/arch/tile/mm/extable.c new file mode 100644 index 000000000000..4fb0acb9d154 --- /dev/null +++ b/arch/tile/mm/extable.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
19 | int fixup_exception(struct pt_regs *regs) | ||
20 | { | ||
21 | const struct exception_table_entry *fixup; | ||
22 | |||
23 | fixup = search_exception_tables(regs->pc); | ||
24 | if (fixup) { | ||
25 | regs->pc = fixup->fixup; | ||
26 | return 1; | ||
27 | } | ||
28 | |||
29 | return 0; | ||
30 | } | ||
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c new file mode 100644 index 000000000000..0011f06b4fe2 --- /dev/null +++ b/arch/tile/mm/fault.c | |||
@@ -0,0 +1,867 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * From i386 code copyright (C) 1995 Linus Torvalds | ||
15 | */ | ||
16 | |||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/tty.h> | ||
31 | #include <linux/vt_kern.h> /* For unblank_screen() */ | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/kprobes.h> | ||
35 | #include <linux/hugetlb.h> | ||
36 | #include <linux/syscalls.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/pgalloc.h> | ||
41 | #include <asm/sections.h> | ||
42 | #include <asm/traps.h> | ||
43 | #include <asm/syscalls.h> | ||
44 | |||
45 | #include <arch/interrupts.h> | ||
46 | |||
47 | static noinline void force_sig_info_fault(int si_signo, int si_code, | ||
48 | unsigned long address, int fault_num, struct task_struct *tsk) | ||
49 | { | ||
50 | siginfo_t info; | ||
51 | |||
52 | if (unlikely(tsk->pid < 2)) { | ||
53 | panic("Signal %d (code %d) at %#lx sent to %s!", | ||
54 | si_signo, si_code & 0xffff, address, | ||
55 | tsk->pid ? "init" : "the idle task"); | ||
56 | } | ||
57 | |||
58 | info.si_signo = si_signo; | ||
59 | info.si_errno = 0; | ||
60 | info.si_code = si_code; | ||
61 | info.si_addr = (void __user *)address; | ||
62 | info.si_trapno = fault_num; | ||
63 | force_sig_info(si_signo, &info, tsk); | ||
64 | } | ||
65 | |||
66 | #ifndef __tilegx__ | ||
67 | /* | ||
68 | * Synthesize the fault a PL0 process would get by doing a word-load of | ||
69 | * an unaligned address or a high kernel address. Called indirectly | ||
70 | * from sys_cmpxchg() in kernel/intvec.S. | ||
71 | */ | ||
72 | int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) | ||
73 | { | ||
74 | if (address >= PAGE_OFFSET) | ||
75 | force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, | ||
76 | INT_DTLB_MISS, current); | ||
77 | else | ||
78 | force_sig_info_fault(SIGBUS, BUS_ADRALN, address, | ||
79 | INT_UNALIGN_DATA, current); | ||
80 | |||
81 | /* | ||
82 | * Adjust pc to point at the actual instruction, which is unusual | ||
83 | * for syscalls normally, but is appropriate when we are claiming | ||
84 | * that a syscall swint1 caused a page fault or bus error. | ||
85 | */ | ||
86 | regs->pc -= 8; | ||
87 | |||
88 | /* | ||
89 | * Mark this as a caller-save interrupt, like a normal page fault, | ||
90 | * so that when we go through the signal handler path we will | ||
91 | * properly restore r0, r1, and r2 for the signal handler arguments. | ||
92 | */ | ||
93 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | #endif | ||
98 | |||
99 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
100 | { | ||
101 | unsigned index = pgd_index(address); | ||
102 | pgd_t *pgd_k; | ||
103 | pud_t *pud, *pud_k; | ||
104 | pmd_t *pmd, *pmd_k; | ||
105 | |||
106 | pgd += index; | ||
107 | pgd_k = init_mm.pgd + index; | ||
108 | |||
109 | if (!pgd_present(*pgd_k)) | ||
110 | return NULL; | ||
111 | |||
112 | pud = pud_offset(pgd, address); | ||
113 | pud_k = pud_offset(pgd_k, address); | ||
114 | if (!pud_present(*pud_k)) | ||
115 | return NULL; | ||
116 | |||
117 | pmd = pmd_offset(pud, address); | ||
118 | pmd_k = pmd_offset(pud_k, address); | ||
119 | if (!pmd_present(*pmd_k)) | ||
120 | return NULL; | ||
121 | if (!pmd_present(*pmd)) { | ||
122 | set_pmd(pmd, *pmd_k); | ||
123 | arch_flush_lazy_mmu_mode(); | ||
124 | } else | ||
125 | BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); | ||
126 | return pmd_k; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Handle a fault on the vmalloc or module mapping area | ||
131 | */ | ||
132 | static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) | ||
133 | { | ||
134 | pmd_t *pmd_k; | ||
135 | pte_t *pte_k; | ||
136 | |||
137 | /* Make sure we are in vmalloc area */ | ||
138 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
139 | return -1; | ||
140 | |||
141 | /* | ||
142 | * Synchronize this task's top level page-table | ||
143 | * with the 'reference' page table. | ||
144 | */ | ||
145 | pmd_k = vmalloc_sync_one(pgd, address); | ||
146 | if (!pmd_k) | ||
147 | return -1; | ||
148 | if (pmd_huge(*pmd_k)) | ||
149 | return 0; /* support TILE huge_vmap() API */ | ||
150 | pte_k = pte_offset_kernel(pmd_k, address); | ||
151 | if (!pte_present(*pte_k)) | ||
152 | return -1; | ||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | /* Wait until this PTE has completed migration. */ | ||
157 | static void wait_for_migration(pte_t *pte) | ||
158 | { | ||
159 | if (pte_migrating(*pte)) { | ||
160 | /* | ||
161 | * Wait until the migrater fixes up this pte. | ||
162 | * We scale the loop count by the clock rate so we'll wait for | ||
163 | * a few seconds here. | ||
164 | */ | ||
165 | int retries = 0; | ||
166 | int bound = get_clock_rate(); | ||
167 | while (pte_migrating(*pte)) { | ||
168 | barrier(); | ||
169 | if (++retries > bound) | ||
170 | panic("Hit migrating PTE (%#llx) and" | ||
171 | " page PFN %#lx still migrating", | ||
172 | pte->val, pte_pfn(*pte)); | ||
173 | } | ||
174 | } | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * It's not generally safe to use "current" to get the page table pointer, | ||
179 | * since we might be running an oprofile interrupt in the middle of a | ||
180 | * task switch. | ||
181 | */ | ||
182 | static pgd_t *get_current_pgd(void) | ||
183 | { | ||
184 | HV_Context ctx = hv_inquire_context(); | ||
185 | unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; | ||
186 | struct page *pgd_page = pfn_to_page(pgd_pfn); | ||
187 | BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ | ||
188 | return (pgd_t *) __va(ctx.page_table); | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * We can receive a page fault from a migrating PTE at any time. | ||
193 | * Handle it by just waiting until the fault resolves. | ||
194 | * | ||
195 | * It's also possible to get a migrating kernel PTE that resolves | ||
196 | * itself during the downcall from hypervisor to Linux. We just check | ||
197 | * here to see if the PTE seems valid, and if so we retry it. | ||
198 | * | ||
199 | * NOTE! We MUST NOT take any locks for this case. We may be in an | ||
200 | * interrupt or a critical region, and must do as little as possible. | ||
201 | * Similarly, we can't use atomic ops here, since we may be handling a | ||
202 | * fault caused by an atomic op access. | ||
203 | */ | ||
204 | static int handle_migrating_pte(pgd_t *pgd, int fault_num, | ||
205 | unsigned long address, | ||
206 | int is_kernel_mode, int write) | ||
207 | { | ||
208 | pud_t *pud; | ||
209 | pmd_t *pmd; | ||
210 | pte_t *pte; | ||
211 | pte_t pteval; | ||
212 | |||
213 | if (pgd_addr_invalid(address)) | ||
214 | return 0; | ||
215 | |||
216 | pgd += pgd_index(address); | ||
217 | pud = pud_offset(pgd, address); | ||
218 | if (!pud || !pud_present(*pud)) | ||
219 | return 0; | ||
220 | pmd = pmd_offset(pud, address); | ||
221 | if (!pmd || !pmd_present(*pmd)) | ||
222 | return 0; | ||
223 | pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : | ||
224 | pte_offset_kernel(pmd, address); | ||
225 | pteval = *pte; | ||
226 | if (pte_migrating(pteval)) { | ||
227 | wait_for_migration(pte); | ||
228 | return 1; | ||
229 | } | ||
230 | |||
231 | if (!is_kernel_mode || !pte_present(pteval)) | ||
232 | return 0; | ||
233 | if (fault_num == INT_ITLB_MISS) { | ||
234 | if (pte_exec(pteval)) | ||
235 | return 1; | ||
236 | } else if (write) { | ||
237 | if (pte_write(pteval)) | ||
238 | return 1; | ||
239 | } else { | ||
240 | if (pte_read(pteval)) | ||
241 | return 1; | ||
242 | } | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * This routine is responsible for faulting in user pages. | ||
249 | * It passes the work off to one of the appropriate routines. | ||
250 | * It returns true if the fault was successfully handled. | ||
251 | */ | ||
252 | static int handle_page_fault(struct pt_regs *regs, | ||
253 | int fault_num, | ||
254 | int is_page_fault, | ||
255 | unsigned long address, | ||
256 | int write) | ||
257 | { | ||
258 | struct task_struct *tsk; | ||
259 | struct mm_struct *mm; | ||
260 | struct vm_area_struct *vma; | ||
261 | unsigned long stack_offset; | ||
262 | int fault; | ||
263 | int si_code; | ||
264 | int is_kernel_mode; | ||
265 | pgd_t *pgd; | ||
266 | |||
267 | /* on TILE, protection faults are always writes */ | ||
268 | if (!is_page_fault) | ||
269 | write = 1; | ||
270 | |||
271 | is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); | ||
272 | |||
273 | tsk = validate_current(); | ||
274 | |||
275 | /* | ||
276 | * Check to see if we might be overwriting the stack, and bail | ||
277 | * out if so. The page fault code is a relatively likely | ||
278 | * place to get trapped in an infinite regress, and once we | ||
279 | * overwrite the whole stack, it becomes very hard to recover. | ||
280 | */ | ||
281 | stack_offset = stack_pointer & (THREAD_SIZE-1); | ||
282 | if (stack_offset < THREAD_SIZE / 8) { | ||
283 | pr_alert("Potential stack overrun: sp %#lx\n", | ||
284 | stack_pointer); | ||
285 | show_regs(regs); | ||
286 | pr_alert("Killing current process %d/%s\n", | ||
287 | tsk->pid, tsk->comm); | ||
288 | do_group_exit(SIGKILL); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Early on, we need to check for migrating PTE entries; | ||
293 | * see homecache.c. If we find a migrating PTE, we wait until | ||
294 | * the backing page claims to be done migrating, then we procede. | ||
295 | * For kernel PTEs, we rewrite the PTE and return and retry. | ||
296 | * Otherwise, we treat the fault like a normal "no PTE" fault, | ||
297 | * rather than trying to patch up the existing PTE. | ||
298 | */ | ||
299 | pgd = get_current_pgd(); | ||
300 | if (handle_migrating_pte(pgd, fault_num, address, | ||
301 | is_kernel_mode, write)) | ||
302 | return 1; | ||
303 | |||
304 | si_code = SEGV_MAPERR; | ||
305 | |||
306 | /* | ||
307 | * We fault-in kernel-space virtual memory on-demand. The | ||
308 | * 'reference' page table is init_mm.pgd. | ||
309 | * | ||
310 | * NOTE! We MUST NOT take any locks for this case. We may | ||
311 | * be in an interrupt or a critical region, and should | ||
312 | * only copy the information from the master page table, | ||
313 | * nothing more. | ||
314 | * | ||
315 | * This verifies that the fault happens in kernel space | ||
316 | * and that the fault was not a protection fault. | ||
317 | */ | ||
318 | if (unlikely(address >= TASK_SIZE && | ||
319 | !is_arch_mappable_range(address, 0))) { | ||
320 | if (is_kernel_mode && is_page_fault && | ||
321 | vmalloc_fault(pgd, address) >= 0) | ||
322 | return 1; | ||
323 | /* | ||
324 | * Don't take the mm semaphore here. If we fixup a prefetch | ||
325 | * fault we could otherwise deadlock. | ||
326 | */ | ||
327 | mm = NULL; /* happy compiler */ | ||
328 | vma = NULL; | ||
329 | goto bad_area_nosemaphore; | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | * If we're trying to touch user-space addresses, we must | ||
334 | * be either at PL0, or else with interrupts enabled in the | ||
335 | * kernel, so either way we can re-enable interrupts here. | ||
336 | */ | ||
337 | local_irq_enable(); | ||
338 | |||
339 | mm = tsk->mm; | ||
340 | |||
341 | /* | ||
342 | * If we're in an interrupt, have no user context or are running in an | ||
343 | * atomic region then we must not take the fault. | ||
344 | */ | ||
345 | if (in_atomic() || !mm) { | ||
346 | vma = NULL; /* happy compiler */ | ||
347 | goto bad_area_nosemaphore; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * When running in the kernel we expect faults to occur only to | ||
352 | * addresses in user space. All other faults represent errors in the | ||
353 | * kernel and should generate an OOPS. Unfortunately, in the case of an | ||
354 | * erroneous fault occurring in a code path which already holds mmap_sem | ||
355 | * we will deadlock attempting to validate the fault against the | ||
356 | * address space. Luckily the kernel only validly references user | ||
357 | * space from well defined areas of code, which are listed in the | ||
358 | * exceptions table. | ||
359 | * | ||
360 | * As the vast majority of faults will be valid we will only perform | ||
361 | * the source reference check when there is a possibility of a deadlock. | ||
362 | * Attempt to lock the address space, if we cannot we then validate the | ||
363 | * source. If this is invalid we can skip the address space check, | ||
364 | * thus avoiding the deadlock. | ||
365 | */ | ||
366 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
367 | if (is_kernel_mode && | ||
368 | !search_exception_tables(regs->pc)) { | ||
369 | vma = NULL; /* happy compiler */ | ||
370 | goto bad_area_nosemaphore; | ||
371 | } | ||
372 | down_read(&mm->mmap_sem); | ||
373 | } | ||
374 | |||
375 | vma = find_vma(mm, address); | ||
376 | if (!vma) | ||
377 | goto bad_area; | ||
378 | if (vma->vm_start <= address) | ||
379 | goto good_area; | ||
380 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
381 | goto bad_area; | ||
382 | if (regs->sp < PAGE_OFFSET) { | ||
383 | /* | ||
384 | * accessing the stack below sp is always a bug. | ||
385 | */ | ||
386 | if (address < regs->sp) | ||
387 | goto bad_area; | ||
388 | } | ||
389 | if (expand_stack(vma, address)) | ||
390 | goto bad_area; | ||
391 | |||
392 | /* | ||
393 | * Ok, we have a good vm_area for this memory access, so | ||
394 | * we can handle it.. | ||
395 | */ | ||
396 | good_area: | ||
397 | si_code = SEGV_ACCERR; | ||
398 | if (fault_num == INT_ITLB_MISS) { | ||
399 | if (!(vma->vm_flags & VM_EXEC)) | ||
400 | goto bad_area; | ||
401 | } else if (write) { | ||
402 | #ifdef TEST_VERIFY_AREA | ||
403 | if (!is_page_fault && regs->cs == KERNEL_CS) | ||
404 | pr_err("WP fault at "REGFMT"\n", regs->eip); | ||
405 | #endif | ||
406 | if (!(vma->vm_flags & VM_WRITE)) | ||
407 | goto bad_area; | ||
408 | } else { | ||
409 | if (!is_page_fault || !(vma->vm_flags & VM_READ)) | ||
410 | goto bad_area; | ||
411 | } | ||
412 | |||
413 | survive: | ||
414 | /* | ||
415 | * If for any reason at all we couldn't handle the fault, | ||
416 | * make sure we exit gracefully rather than endlessly redo | ||
417 | * the fault. | ||
418 | */ | ||
419 | fault = handle_mm_fault(mm, vma, address, write); | ||
420 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
421 | if (fault & VM_FAULT_OOM) | ||
422 | goto out_of_memory; | ||
423 | else if (fault & VM_FAULT_SIGBUS) | ||
424 | goto do_sigbus; | ||
425 | BUG(); | ||
426 | } | ||
427 | if (fault & VM_FAULT_MAJOR) | ||
428 | tsk->maj_flt++; | ||
429 | else | ||
430 | tsk->min_flt++; | ||
431 | |||
432 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
433 | /* | ||
434 | * If this was an asynchronous fault, | ||
435 | * restart the appropriate engine. | ||
436 | */ | ||
437 | switch (fault_num) { | ||
438 | #if CHIP_HAS_TILE_DMA() | ||
439 | case INT_DMATLB_MISS: | ||
440 | case INT_DMATLB_MISS_DWNCL: | ||
441 | case INT_DMATLB_ACCESS: | ||
442 | case INT_DMATLB_ACCESS_DWNCL: | ||
443 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
444 | break; | ||
445 | #endif | ||
446 | #if CHIP_HAS_SN_PROC() | ||
447 | case INT_SNITLB_MISS: | ||
448 | case INT_SNITLB_MISS_DWNCL: | ||
449 | __insn_mtspr(SPR_SNCTL, | ||
450 | __insn_mfspr(SPR_SNCTL) & | ||
451 | ~SPR_SNCTL__FRZPROC_MASK); | ||
452 | break; | ||
453 | #endif | ||
454 | } | ||
455 | #endif | ||
456 | |||
457 | up_read(&mm->mmap_sem); | ||
458 | return 1; | ||
459 | |||
460 | /* | ||
461 | * Something tried to access memory that isn't in our memory map.. | ||
462 | * Fix it, but check if it's kernel or user first.. | ||
463 | */ | ||
464 | bad_area: | ||
465 | up_read(&mm->mmap_sem); | ||
466 | |||
467 | bad_area_nosemaphore: | ||
468 | /* User mode accesses just cause a SIGSEGV */ | ||
469 | if (!is_kernel_mode) { | ||
470 | /* | ||
471 | * It's possible to have interrupts off here. | ||
472 | */ | ||
473 | local_irq_enable(); | ||
474 | |||
475 | force_sig_info_fault(SIGSEGV, si_code, address, | ||
476 | fault_num, tsk); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | no_context: | ||
481 | /* Are we prepared to handle this kernel fault? */ | ||
482 | if (fixup_exception(regs)) | ||
483 | return 0; | ||
484 | |||
485 | /* | ||
486 | * Oops. The kernel tried to access some bad page. We'll have to | ||
487 | * terminate things with extreme prejudice. | ||
488 | */ | ||
489 | |||
490 | bust_spinlocks(1); | ||
491 | |||
492 | /* FIXME: no lookup_address() yet */ | ||
493 | #ifdef SUPPORT_LOOKUP_ADDRESS | ||
494 | if (fault_num == INT_ITLB_MISS) { | ||
495 | pte_t *pte = lookup_address(address); | ||
496 | |||
497 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | ||
498 | pr_crit("kernel tried to execute" | ||
499 | " non-executable page - exploit attempt?" | ||
500 | " (uid: %d)\n", current->uid); | ||
501 | } | ||
502 | #endif | ||
503 | if (address < PAGE_SIZE) | ||
504 | pr_alert("Unable to handle kernel NULL pointer dereference\n"); | ||
505 | else | ||
506 | pr_alert("Unable to handle kernel paging request\n"); | ||
507 | pr_alert(" at virtual address "REGFMT", pc "REGFMT"\n", | ||
508 | address, regs->pc); | ||
509 | |||
510 | show_regs(regs); | ||
511 | |||
512 | if (unlikely(tsk->pid < 2)) { | ||
513 | panic("Kernel page fault running %s!", | ||
514 | tsk->pid ? "init" : "the idle task"); | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * More FIXME: we should probably copy the i386 here and | ||
519 | * implement a generic die() routine. Not today. | ||
520 | */ | ||
521 | #ifdef SUPPORT_DIE | ||
522 | die("Oops", regs); | ||
523 | #endif | ||
524 | bust_spinlocks(1); | ||
525 | |||
526 | do_group_exit(SIGKILL); | ||
527 | |||
528 | /* | ||
529 | * We ran out of memory, or some other thing happened to us that made | ||
530 | * us unable to handle the page fault gracefully. | ||
531 | */ | ||
532 | out_of_memory: | ||
533 | up_read(&mm->mmap_sem); | ||
534 | if (is_global_init(tsk)) { | ||
535 | yield(); | ||
536 | down_read(&mm->mmap_sem); | ||
537 | goto survive; | ||
538 | } | ||
539 | pr_alert("VM: killing process %s\n", tsk->comm); | ||
540 | if (!is_kernel_mode) | ||
541 | do_group_exit(SIGKILL); | ||
542 | goto no_context; | ||
543 | |||
544 | do_sigbus: | ||
545 | up_read(&mm->mmap_sem); | ||
546 | |||
547 | /* Kernel mode? Handle exceptions or die */ | ||
548 | if (is_kernel_mode) | ||
549 | goto no_context; | ||
550 | |||
551 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); | ||
552 | return 0; | ||
553 | } | ||
554 | |||
555 | #ifndef __tilegx__ | ||
556 | |||
557 | /* We must release ICS before panicking or we won't get anywhere. */ | ||
558 | #define ics_panic(fmt, ...) do { \ | ||
559 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ | ||
560 | panic(fmt, __VA_ARGS__); \ | ||
561 | } while (0) | ||
562 | |||
563 | /* | ||
564 | * When we take an ITLB or DTLB fault or access violation in the | ||
565 | * supervisor while the critical section bit is set, the hypervisor is | ||
566 | * reluctant to write new values into the EX_CONTEXT_1_x registers, | ||
567 | * since that might indicate we have not yet squirreled the SPR | ||
568 | * contents away and can thus safely take a recursive interrupt. | ||
569 | * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. | ||
570 | */ | ||
571 | struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | ||
572 | unsigned long address, | ||
573 | unsigned long info) | ||
574 | { | ||
575 | unsigned long pc = info & ~1; | ||
576 | int write = info & 1; | ||
577 | pgd_t *pgd = get_current_pgd(); | ||
578 | |||
579 | /* Retval is 1 at first since we will handle the fault fully. */ | ||
580 | struct intvec_state state = { | ||
581 | do_page_fault, fault_num, address, write, 1 | ||
582 | }; | ||
583 | |||
584 | /* Validate that we are plausibly in the right routine. */ | ||
585 | if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || | ||
586 | (fault_num != INT_DTLB_MISS && | ||
587 | fault_num != INT_DTLB_ACCESS)) { | ||
588 | unsigned long old_pc = regs->pc; | ||
589 | regs->pc = pc; | ||
590 | ics_panic("Bad ICS page fault args:" | ||
591 | " old PC %#lx, fault %d/%d at %#lx\n", | ||
592 | old_pc, fault_num, write, address); | ||
593 | } | ||
594 | |||
595 | /* We might be faulting on a vmalloc page, so check that first. */ | ||
596 | if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) | ||
597 | return state; | ||
598 | |||
599 | /* | ||
600 | * If we faulted with ICS set in sys_cmpxchg, we are providing | ||
601 | * a user syscall service that should generate a signal on | ||
602 | * fault. We didn't set up a kernel stack on initial entry to | ||
603 | * sys_cmpxchg, but instead had one set up by the fault, which | ||
604 | * (because sys_cmpxchg never releases ICS) came to us via the | ||
605 | * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are | ||
606 | * still referencing the original user code. We release the | ||
607 | * atomic lock and rewrite pt_regs so that it appears that we | ||
608 | * came from user-space directly, and after we finish the | ||
609 | * fault we'll go back to user space and re-issue the swint. | ||
610 | * This way the backtrace information is correct if we need to | ||
611 | * emit a stack dump at any point while handling this. | ||
612 | * | ||
613 | * Must match register use in sys_cmpxchg(). | ||
614 | */ | ||
615 | if (pc >= (unsigned long) sys_cmpxchg && | ||
616 | pc < (unsigned long) __sys_cmpxchg_end) { | ||
617 | #ifdef CONFIG_SMP | ||
618 | /* Don't unlock before we could have locked. */ | ||
619 | if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { | ||
620 | int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); | ||
621 | __atomic_fault_unlock(lock_ptr); | ||
622 | } | ||
623 | #endif | ||
624 | regs->sp = regs->regs[27]; | ||
625 | } | ||
626 | |||
627 | /* | ||
628 | * We can also fault in the atomic assembly, in which | ||
629 | * case we use the exception table to do the first-level fixup. | ||
630 | * We may re-fixup again in the real fault handler if it | ||
631 | * turns out the faulting address is just bad, and not, | ||
632 | * for example, migrating. | ||
633 | */ | ||
634 | else if (pc >= (unsigned long) __start_atomic_asm_code && | ||
635 | pc < (unsigned long) __end_atomic_asm_code) { | ||
636 | const struct exception_table_entry *fixup; | ||
637 | #ifdef CONFIG_SMP | ||
638 | /* Unlock the atomic lock. */ | ||
639 | int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); | ||
640 | __atomic_fault_unlock(lock_ptr); | ||
641 | #endif | ||
642 | fixup = search_exception_tables(pc); | ||
643 | if (!fixup) | ||
644 | ics_panic("ICS atomic fault not in table:" | ||
645 | " PC %#lx, fault %d", pc, fault_num); | ||
646 | regs->pc = fixup->fixup; | ||
647 | regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); | ||
648 | } | ||
649 | |||
650 | /* | ||
651 | * NOTE: the one other type of access that might bring us here | ||
652 | * are the memory ops in __tns_atomic_acquire/__tns_atomic_release, | ||
653 | * but we don't have to check specially for them since we can | ||
654 | * always safely return to the address of the fault and retry, | ||
655 | * since no separate atomic locks are involved. | ||
656 | */ | ||
657 | |||
658 | /* | ||
659 | * Now that we have released the atomic lock (if necessary), | ||
660 | * it's safe to spin if the PTE that caused the fault was migrating. | ||
661 | */ | ||
662 | if (fault_num == INT_DTLB_ACCESS) | ||
663 | write = 1; | ||
664 | if (handle_migrating_pte(pgd, fault_num, address, 1, write)) | ||
665 | return state; | ||
666 | |||
667 | /* Return zero so that we continue on with normal fault handling. */ | ||
668 | state.retval = 0; | ||
669 | return state; | ||
670 | } | ||
671 | |||
672 | #endif /* !__tilegx__ */ | ||
673 | |||
674 | /* | ||
675 | * This routine handles page faults. It determines the address, and the | ||
676 | * problem, and then passes it handle_page_fault() for normal DTLB and | ||
677 | * ITLB issues, and for DMA or SN processor faults when we are in user | ||
678 | * space. For the latter, if we're in kernel mode, we just save the | ||
679 | * interrupt away appropriately and return immediately. We can't do | ||
680 | * page faults for user code while in kernel mode. | ||
681 | */ | ||
682 | void do_page_fault(struct pt_regs *regs, int fault_num, | ||
683 | unsigned long address, unsigned long write) | ||
684 | { | ||
685 | int is_page_fault; | ||
686 | |||
687 | /* This case should have been handled by do_page_fault_ics(). */ | ||
688 | BUG_ON(write & ~1); | ||
689 | |||
690 | #if CHIP_HAS_TILE_DMA() | ||
691 | /* | ||
692 | * If it's a DMA fault, suspend the transfer while we're | ||
693 | * handling the miss; we'll restart after it's handled. If we | ||
694 | * don't suspend, it's possible that this process could swap | ||
695 | * out and back in, and restart the engine since the DMA is | ||
696 | * still 'running'. | ||
697 | */ | ||
698 | if (fault_num == INT_DMATLB_MISS || | ||
699 | fault_num == INT_DMATLB_ACCESS || | ||
700 | fault_num == INT_DMATLB_MISS_DWNCL || | ||
701 | fault_num == INT_DMATLB_ACCESS_DWNCL) { | ||
702 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | ||
703 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | ||
704 | SPR_DMA_STATUS__BUSY_MASK) | ||
705 | ; | ||
706 | } | ||
707 | #endif | ||
708 | |||
709 | /* Validate fault num and decide if this is a first-time page fault. */ | ||
710 | switch (fault_num) { | ||
711 | case INT_ITLB_MISS: | ||
712 | case INT_DTLB_MISS: | ||
713 | #if CHIP_HAS_TILE_DMA() | ||
714 | case INT_DMATLB_MISS: | ||
715 | case INT_DMATLB_MISS_DWNCL: | ||
716 | #endif | ||
717 | #if CHIP_HAS_SN_PROC() | ||
718 | case INT_SNITLB_MISS: | ||
719 | case INT_SNITLB_MISS_DWNCL: | ||
720 | #endif | ||
721 | is_page_fault = 1; | ||
722 | break; | ||
723 | |||
724 | case INT_DTLB_ACCESS: | ||
725 | #if CHIP_HAS_TILE_DMA() | ||
726 | case INT_DMATLB_ACCESS: | ||
727 | case INT_DMATLB_ACCESS_DWNCL: | ||
728 | #endif | ||
729 | is_page_fault = 0; | ||
730 | break; | ||
731 | |||
732 | default: | ||
733 | panic("Bad fault number %d in do_page_fault", fault_num); | ||
734 | } | ||
735 | |||
736 | if (EX1_PL(regs->ex1) != USER_PL) { | ||
737 | struct async_tlb *async; | ||
738 | switch (fault_num) { | ||
739 | #if CHIP_HAS_TILE_DMA() | ||
740 | case INT_DMATLB_MISS: | ||
741 | case INT_DMATLB_ACCESS: | ||
742 | case INT_DMATLB_MISS_DWNCL: | ||
743 | case INT_DMATLB_ACCESS_DWNCL: | ||
744 | async = ¤t->thread.dma_async_tlb; | ||
745 | break; | ||
746 | #endif | ||
747 | #if CHIP_HAS_SN_PROC() | ||
748 | case INT_SNITLB_MISS: | ||
749 | case INT_SNITLB_MISS_DWNCL: | ||
750 | async = ¤t->thread.sn_async_tlb; | ||
751 | break; | ||
752 | #endif | ||
753 | default: | ||
754 | async = NULL; | ||
755 | } | ||
756 | if (async) { | ||
757 | |||
758 | /* | ||
759 | * No vmalloc check required, so we can allow | ||
760 | * interrupts immediately at this point. | ||
761 | */ | ||
762 | local_irq_enable(); | ||
763 | |||
764 | set_thread_flag(TIF_ASYNC_TLB); | ||
765 | if (async->fault_num != 0) { | ||
766 | panic("Second async fault %d;" | ||
767 | " old fault was %d (%#lx/%ld)", | ||
768 | fault_num, async->fault_num, | ||
769 | address, write); | ||
770 | } | ||
771 | BUG_ON(fault_num == 0); | ||
772 | async->fault_num = fault_num; | ||
773 | async->is_fault = is_page_fault; | ||
774 | async->is_write = write; | ||
775 | async->address = address; | ||
776 | return; | ||
777 | } | ||
778 | } | ||
779 | |||
780 | handle_page_fault(regs, fault_num, is_page_fault, address, write); | ||
781 | } | ||
782 | |||
783 | |||
784 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
785 | /* | ||
786 | * Check an async_tlb structure to see if a deferred fault is waiting, | ||
787 | * and if so pass it to the page-fault code. | ||
788 | */ | ||
789 | static void handle_async_page_fault(struct pt_regs *regs, | ||
790 | struct async_tlb *async) | ||
791 | { | ||
792 | if (async->fault_num) { | ||
793 | /* | ||
794 | * Clear async->fault_num before calling the page-fault | ||
795 | * handler so that if we re-interrupt before returning | ||
796 | * from the function we have somewhere to put the | ||
797 | * information from the new interrupt. | ||
798 | */ | ||
799 | int fault_num = async->fault_num; | ||
800 | async->fault_num = 0; | ||
801 | handle_page_fault(regs, fault_num, async->is_fault, | ||
802 | async->address, async->is_write); | ||
803 | } | ||
804 | } | ||
805 | #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ | ||
806 | |||
807 | |||
808 | /* | ||
809 | * This routine effectively re-issues asynchronous page faults | ||
810 | * when we are returning to user space. | ||
811 | */ | ||
812 | void do_async_page_fault(struct pt_regs *regs) | ||
813 | { | ||
814 | /* | ||
815 | * Clear thread flag early. If we re-interrupt while processing | ||
816 | * code here, we will reset it and recall this routine before | ||
817 | * returning to user space. | ||
818 | */ | ||
819 | clear_thread_flag(TIF_ASYNC_TLB); | ||
820 | |||
821 | #if CHIP_HAS_TILE_DMA() | ||
822 | handle_async_page_fault(regs, ¤t->thread.dma_async_tlb); | ||
823 | #endif | ||
824 | #if CHIP_HAS_SN_PROC() | ||
825 | handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); | ||
826 | #endif | ||
827 | } | ||
828 | |||
829 | void vmalloc_sync_all(void) | ||
830 | { | ||
831 | #ifdef __tilegx__ | ||
832 | /* Currently all L1 kernel pmd's are static and shared. */ | ||
833 | BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); | ||
834 | #else | ||
835 | /* | ||
836 | * Note that races in the updates of insync and start aren't | ||
837 | * problematic: insync can only get set bits added, and updates to | ||
838 | * start are only improving performance (without affecting correctness | ||
839 | * if undone). | ||
840 | */ | ||
841 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | ||
842 | static unsigned long start = PAGE_OFFSET; | ||
843 | unsigned long address; | ||
844 | |||
845 | BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); | ||
846 | for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { | ||
847 | if (!test_bit(pgd_index(address), insync)) { | ||
848 | unsigned long flags; | ||
849 | struct list_head *pos; | ||
850 | |||
851 | spin_lock_irqsave(&pgd_lock, flags); | ||
852 | list_for_each(pos, &pgd_list) | ||
853 | if (!vmalloc_sync_one(list_to_pgd(pos), | ||
854 | address)) { | ||
855 | /* Must be at first entry in list. */ | ||
856 | BUG_ON(pos != pgd_list.next); | ||
857 | break; | ||
858 | } | ||
859 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
860 | if (pos != pgd_list.next) | ||
861 | set_bit(pgd_index(address), insync); | ||
862 | } | ||
863 | if (address == start && test_bit(pgd_index(address), insync)) | ||
864 | start = address + PGDIR_SIZE; | ||
865 | } | ||
866 | #endif | ||
867 | } | ||
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c new file mode 100644 index 000000000000..ff1cdff5114d --- /dev/null +++ b/arch/tile/mm/highmem.c | |||
@@ -0,0 +1,328 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/highmem.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <asm/homecache.h> | ||
19 | |||
20 | #define kmap_get_pte(vaddr) \ | ||
21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ | ||
22 | (vaddr)), (vaddr)) | ||
23 | |||
24 | |||
25 | void *kmap(struct page *page) | ||
26 | { | ||
27 | void *kva; | ||
28 | unsigned long flags; | ||
29 | pte_t *ptep; | ||
30 | |||
31 | might_sleep(); | ||
32 | if (!PageHighMem(page)) | ||
33 | return page_address(page); | ||
34 | kva = kmap_high(page); | ||
35 | |||
36 | /* | ||
37 | * Rewrite the PTE under the lock. This ensures that the page | ||
38 | * is not currently migrating. | ||
39 | */ | ||
40 | ptep = kmap_get_pte((unsigned long)kva); | ||
41 | flags = homecache_kpte_lock(); | ||
42 | set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); | ||
43 | homecache_kpte_unlock(flags); | ||
44 | |||
45 | return kva; | ||
46 | } | ||
47 | EXPORT_SYMBOL(kmap); | ||
48 | |||
49 | void kunmap(struct page *page) | ||
50 | { | ||
51 | if (in_interrupt()) | ||
52 | BUG(); | ||
53 | if (!PageHighMem(page)) | ||
54 | return; | ||
55 | kunmap_high(page); | ||
56 | } | ||
57 | EXPORT_SYMBOL(kunmap); | ||
58 | |||
59 | static void debug_kmap_atomic_prot(enum km_type type) | ||
60 | { | ||
61 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
62 | static unsigned warn_count = 10; | ||
63 | |||
64 | if (unlikely(warn_count == 0)) | ||
65 | return; | ||
66 | |||
67 | if (unlikely(in_interrupt())) { | ||
68 | if (in_irq()) { | ||
69 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
70 | type != KM_BIO_SRC_IRQ && | ||
71 | /* type != KM_BIO_DST_IRQ && */ | ||
72 | type != KM_BOUNCE_READ) { | ||
73 | WARN_ON(1); | ||
74 | warn_count--; | ||
75 | } | ||
76 | } else if (!irqs_disabled()) { /* softirq */ | ||
77 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
78 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | ||
79 | type != KM_SKB_SUNRPC_DATA && | ||
80 | type != KM_SKB_DATA_SOFTIRQ && | ||
81 | type != KM_BOUNCE_READ) { | ||
82 | WARN_ON(1); | ||
83 | warn_count--; | ||
84 | } | ||
85 | } | ||
86 | } | ||
87 | |||
88 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | ||
89 | type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { | ||
90 | if (!irqs_disabled()) { | ||
91 | WARN_ON(1); | ||
92 | warn_count--; | ||
93 | } | ||
94 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | ||
95 | if (irq_count() == 0 && !irqs_disabled()) { | ||
96 | WARN_ON(1); | ||
97 | warn_count--; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Describe a single atomic mapping of a page on a given cpu at a | ||
105 | * given address, and allow it to be linked into a list. | ||
106 | */ | ||
107 | struct atomic_mapped_page { | ||
108 | struct list_head list; | ||
109 | struct page *page; | ||
110 | int cpu; | ||
111 | unsigned long va; | ||
112 | }; | ||
113 | |||
114 | static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock); | ||
115 | static struct list_head amp_list = LIST_HEAD_INIT(amp_list); | ||
116 | |||
117 | /* | ||
118 | * Combining this structure with a per-cpu declaration lets us give | ||
119 | * each cpu an atomic_mapped_page structure per type. | ||
120 | */ | ||
121 | struct kmap_amps { | ||
122 | struct atomic_mapped_page per_type[KM_TYPE_NR]; | ||
123 | }; | ||
124 | static DEFINE_PER_CPU(struct kmap_amps, amps); | ||
125 | |||
126 | /* | ||
127 | * Add a page and va, on this cpu, to the list of kmap_atomic pages, | ||
128 | * and write the new pte to memory. Writing the new PTE under the | ||
129 | * lock guarantees that it is either on the list before migration starts | ||
130 | * (if we won the race), or set_pte() sets the migrating bit in the PTE | ||
131 | * (if we lost the race). And doing it under the lock guarantees | ||
132 | * that when kmap_atomic_fix_one_pte() comes along, it finds a valid | ||
133 | * PTE in memory, iff the mapping is still on the amp_list. | ||
134 | * | ||
135 | * Finally, doing it under the lock lets us safely examine the page | ||
136 | * to see if it is immutable or not, for the generic kmap_atomic() case. | ||
137 | * If we examine it earlier we are exposed to a race where it looks | ||
138 | * writable earlier, but becomes immutable before we write the PTE. | ||
139 | */ | ||
140 | static void kmap_atomic_register(struct page *page, enum km_type type, | ||
141 | unsigned long va, pte_t *ptep, pte_t pteval) | ||
142 | { | ||
143 | unsigned long flags; | ||
144 | struct atomic_mapped_page *amp; | ||
145 | |||
146 | flags = homecache_kpte_lock(); | ||
147 | spin_lock(&_lock); | ||
148 | |||
149 | /* With interrupts disabled, now fill in the per-cpu info. */ | ||
150 | amp = &__get_cpu_var(amps).per_type[type]; | ||
151 | amp->page = page; | ||
152 | amp->cpu = smp_processor_id(); | ||
153 | amp->va = va; | ||
154 | |||
155 | /* For generic kmap_atomic(), choose the PTE writability now. */ | ||
156 | if (!pte_read(pteval)) | ||
157 | pteval = mk_pte(page, page_to_kpgprot(page)); | ||
158 | |||
159 | list_add(&->list, &_list); | ||
160 | set_pte(ptep, pteval); | ||
161 | arch_flush_lazy_mmu_mode(); | ||
162 | |||
163 | spin_unlock(&_lock); | ||
164 | homecache_kpte_unlock(flags); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Remove a page and va, on this cpu, from the list of kmap_atomic pages. | ||
169 | * Linear-time search, but we count on the lists being short. | ||
170 | * We don't need to adjust the PTE under the lock (as opposed to the | ||
171 | * kmap_atomic_register() case), since we're just unconditionally | ||
172 | * zeroing the PTE after it's off the list. | ||
173 | */ | ||
174 | static void kmap_atomic_unregister(struct page *page, unsigned long va) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | struct atomic_mapped_page *amp; | ||
178 | int cpu = smp_processor_id(); | ||
179 | spin_lock_irqsave(&_lock, flags); | ||
180 | list_for_each_entry(amp, &_list, list) { | ||
181 | if (amp->page == page && amp->cpu == cpu && amp->va == va) | ||
182 | break; | ||
183 | } | ||
184 | BUG_ON(&->list == &_list); | ||
185 | list_del(&->list); | ||
186 | spin_unlock_irqrestore(&_lock, flags); | ||
187 | } | ||
188 | |||
189 | /* Helper routine for kmap_atomic_fix_kpte(), below. */ | ||
190 | static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp, | ||
191 | int finished) | ||
192 | { | ||
193 | pte_t *ptep = kmap_get_pte(amp->va); | ||
194 | if (!finished) { | ||
195 | set_pte(ptep, pte_mkmigrate(*ptep)); | ||
196 | flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE, | ||
197 | cpumask_of(amp->cpu), NULL, 0); | ||
198 | } else { | ||
199 | /* | ||
200 | * Rewrite a default kernel PTE for this page. | ||
201 | * We rely on the fact that set_pte() writes the | ||
202 | * present+migrating bits last. | ||
203 | */ | ||
204 | pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page)); | ||
205 | set_pte(ptep, pte); | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * This routine is a helper function for homecache_fix_kpte(); see | ||
211 | * its comments for more information on the "finished" argument here. | ||
212 | * | ||
213 | * Note that we hold the lock while doing the remote flushes, which | ||
214 | * will stall any unrelated cpus trying to do kmap_atomic operations. | ||
215 | * We could just update the PTEs under the lock, and save away copies | ||
216 | * of the structs (or just the va+cpu), then flush them after we | ||
217 | * release the lock, but it seems easier just to do it all under the lock. | ||
218 | */ | ||
219 | void kmap_atomic_fix_kpte(struct page *page, int finished) | ||
220 | { | ||
221 | struct atomic_mapped_page *amp; | ||
222 | unsigned long flags; | ||
223 | spin_lock_irqsave(&_lock, flags); | ||
224 | list_for_each_entry(amp, &_list, list) { | ||
225 | if (amp->page == page) | ||
226 | kmap_atomic_fix_one_kpte(amp, finished); | ||
227 | } | ||
228 | spin_unlock_irqrestore(&_lock, flags); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap | ||
233 | * because the kmap code must perform a global TLB invalidation when | ||
234 | * the kmap pool wraps. | ||
235 | * | ||
236 | * Note that they may be slower than on x86 (etc.) because unlike on | ||
237 | * those platforms, we do have to take a global lock to map and unmap | ||
238 | * pages on Tile (see above). | ||
239 | * | ||
240 | * When holding an atomic kmap is is not legal to sleep, so atomic | ||
241 | * kmaps are appropriate for short, tight code paths only. | ||
242 | */ | ||
243 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
244 | { | ||
245 | enum fixed_addresses idx; | ||
246 | unsigned long vaddr; | ||
247 | pte_t *pte; | ||
248 | |||
249 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
250 | pagefault_disable(); | ||
251 | |||
252 | /* Avoid icache flushes by disallowing atomic executable mappings. */ | ||
253 | BUG_ON(pte_exec(prot)); | ||
254 | |||
255 | if (!PageHighMem(page)) | ||
256 | return page_address(page); | ||
257 | |||
258 | debug_kmap_atomic_prot(type); | ||
259 | |||
260 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
261 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
262 | pte = kmap_get_pte(vaddr); | ||
263 | BUG_ON(!pte_none(*pte)); | ||
264 | |||
265 | /* Register that this page is mapped atomically on this cpu. */ | ||
266 | kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); | ||
267 | |||
268 | return (void *)vaddr; | ||
269 | } | ||
270 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
271 | |||
272 | void *kmap_atomic(struct page *page, enum km_type type) | ||
273 | { | ||
274 | /* PAGE_NONE is a magic value that tells us to check immutability. */ | ||
275 | return kmap_atomic_prot(page, type, PAGE_NONE); | ||
276 | } | ||
277 | EXPORT_SYMBOL(kmap_atomic); | ||
278 | |||
279 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
280 | { | ||
281 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
282 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
283 | |||
284 | /* | ||
285 | * Force other mappings to Oops if they try to access this pte without | ||
286 | * first remapping it. Keeping stale mappings around is a bad idea. | ||
287 | */ | ||
288 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { | ||
289 | pte_t *pte = kmap_get_pte(vaddr); | ||
290 | pte_t pteval = *pte; | ||
291 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); | ||
292 | kmap_atomic_unregister(pte_page(pteval), vaddr); | ||
293 | kpte_clear_flush(pte, vaddr); | ||
294 | } else { | ||
295 | /* Must be a lowmem page */ | ||
296 | BUG_ON(vaddr < PAGE_OFFSET); | ||
297 | BUG_ON(vaddr >= (unsigned long)high_memory); | ||
298 | } | ||
299 | |||
300 | arch_flush_lazy_mmu_mode(); | ||
301 | pagefault_enable(); | ||
302 | } | ||
303 | EXPORT_SYMBOL(kunmap_atomic); | ||
304 | |||
305 | /* | ||
306 | * This API is supposed to allow us to map memory without a "struct page". | ||
307 | * Currently we don't support this, though this may change in the future. | ||
308 | */ | ||
309 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | ||
310 | { | ||
311 | return kmap_atomic(pfn_to_page(pfn), type); | ||
312 | } | ||
313 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | ||
314 | { | ||
315 | return kmap_atomic_prot(pfn_to_page(pfn), type, prot); | ||
316 | } | ||
317 | |||
318 | struct page *kmap_atomic_to_page(void *ptr) | ||
319 | { | ||
320 | pte_t *pte; | ||
321 | unsigned long vaddr = (unsigned long)ptr; | ||
322 | |||
323 | if (vaddr < FIXADDR_START) | ||
324 | return virt_to_page(ptr); | ||
325 | |||
326 | pte = kmap_get_pte(vaddr); | ||
327 | return pte_page(*pte); | ||
328 | } | ||
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c new file mode 100644 index 000000000000..97c478e7be27 --- /dev/null +++ b/arch/tile/mm/homecache.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This code maintains the "home" for each page in the system. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/rmap.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/sysctl.h> | ||
27 | #include <linux/pagevec.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <linux/cache.h> | ||
31 | #include <linux/smp.h> | ||
32 | |||
33 | #include <asm/page.h> | ||
34 | #include <asm/sections.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/homecache.h> | ||
38 | |||
39 | #include "migrate.h" | ||
40 | |||
41 | |||
42 | #if CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
43 | |||
44 | /* | ||
45 | * The noallocl2 option suppresses all use of the L2 cache to cache | ||
46 | * locally from a remote home. There's no point in using it if we | ||
47 | * don't have coherent local caching, though. | ||
48 | */ | ||
49 | static int __write_once noallocl2; | ||
50 | static int __init set_noallocl2(char *str) | ||
51 | { | ||
52 | noallocl2 = 1; | ||
53 | return 0; | ||
54 | } | ||
55 | early_param("noallocl2", set_noallocl2); | ||
56 | |||
57 | #else | ||
58 | |||
59 | #define noallocl2 0 | ||
60 | |||
61 | #endif | ||
62 | |||
63 | /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ | ||
64 | #define mark_caches_evicted_start() 0 | ||
65 | #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) | ||
66 | |||
67 | |||
68 | /* | ||
69 | * Update the irq_stat for cpus that we are going to interrupt | ||
70 | * with TLB or cache flushes. Also handle removing dataplane cpus | ||
71 | * from the TLB flush set, and setting dataplane_tlb_state instead. | ||
72 | */ | ||
73 | static void hv_flush_update(const struct cpumask *cache_cpumask, | ||
74 | struct cpumask *tlb_cpumask, | ||
75 | unsigned long tlb_va, unsigned long tlb_length, | ||
76 | HV_Remote_ASID *asids, int asidcount) | ||
77 | { | ||
78 | struct cpumask mask; | ||
79 | int i, cpu; | ||
80 | |||
81 | cpumask_clear(&mask); | ||
82 | if (cache_cpumask) | ||
83 | cpumask_or(&mask, &mask, cache_cpumask); | ||
84 | if (tlb_cpumask && tlb_length) { | ||
85 | cpumask_or(&mask, &mask, tlb_cpumask); | ||
86 | } | ||
87 | |||
88 | for (i = 0; i < asidcount; ++i) | ||
89 | cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask); | ||
90 | |||
91 | /* | ||
92 | * Don't bother to update atomically; losing a count | ||
93 | * here is not that critical. | ||
94 | */ | ||
95 | for_each_cpu(cpu, &mask) | ||
96 | ++per_cpu(irq_stat, cpu).irq_hv_flush_count; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * This wrapper function around hv_flush_remote() does several things: | ||
101 | * | ||
102 | * - Provides a return value error-checking panic path, since | ||
103 | * there's never any good reason for hv_flush_remote() to fail. | ||
104 | * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally | ||
105 | * is the type that Linux wants to pass around anyway. | ||
106 | * - Centralizes the mark_caches_evicted() handling. | ||
107 | * - Canonicalizes that lengths of zero make cpumasks NULL. | ||
108 | * - Handles deferring TLB flushes for dataplane tiles. | ||
109 | * - Tracks remote interrupts in the per-cpu irq_cpustat_t. | ||
110 | * | ||
111 | * Note that we have to wait until the cache flush completes before | ||
112 | * updating the per-cpu last_cache_flush word, since otherwise another | ||
113 | * concurrent flush can race, conclude the flush has already | ||
114 | * completed, and start to use the page while it's still dirty | ||
115 | * remotely (running concurrently with the actual evict, presumably). | ||
116 | */ | ||
117 | void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | ||
118 | const struct cpumask *cache_cpumask_orig, | ||
119 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
120 | unsigned long tlb_pgsize, | ||
121 | const struct cpumask *tlb_cpumask_orig, | ||
122 | HV_Remote_ASID *asids, int asidcount) | ||
123 | { | ||
124 | int rc; | ||
125 | int timestamp = 0; /* happy compiler */ | ||
126 | struct cpumask cache_cpumask_copy, tlb_cpumask_copy; | ||
127 | struct cpumask *cache_cpumask, *tlb_cpumask; | ||
128 | HV_PhysAddr cache_pa; | ||
129 | char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5]; | ||
130 | |||
131 | mb(); /* provided just to simplify "magic hypervisor" mode */ | ||
132 | |||
133 | /* | ||
134 | * Canonicalize and copy the cpumasks. | ||
135 | */ | ||
136 | if (cache_cpumask_orig && cache_control) { | ||
137 | cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig); | ||
138 | cache_cpumask = &cache_cpumask_copy; | ||
139 | } else { | ||
140 | cpumask_clear(&cache_cpumask_copy); | ||
141 | cache_cpumask = NULL; | ||
142 | } | ||
143 | if (cache_cpumask == NULL) | ||
144 | cache_control = 0; | ||
145 | if (tlb_cpumask_orig && tlb_length) { | ||
146 | cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig); | ||
147 | tlb_cpumask = &tlb_cpumask_copy; | ||
148 | } else { | ||
149 | cpumask_clear(&tlb_cpumask_copy); | ||
150 | tlb_cpumask = NULL; | ||
151 | } | ||
152 | |||
153 | hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, | ||
154 | asids, asidcount); | ||
155 | cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; | ||
156 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
157 | timestamp = mark_caches_evicted_start(); | ||
158 | rc = hv_flush_remote(cache_pa, cache_control, | ||
159 | cpumask_bits(cache_cpumask), | ||
160 | tlb_va, tlb_length, tlb_pgsize, | ||
161 | cpumask_bits(tlb_cpumask), | ||
162 | asids, asidcount); | ||
163 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
164 | mark_caches_evicted_finish(cache_cpumask, timestamp); | ||
165 | if (rc == 0) | ||
166 | return; | ||
167 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | ||
168 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); | ||
169 | |||
170 | pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," | ||
171 | " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", | ||
172 | cache_pa, cache_control, cache_cpumask, cache_buf, | ||
173 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, | ||
174 | tlb_cpumask, tlb_buf, | ||
175 | asids, asidcount, rc); | ||
176 | panic("Unsafe to continue."); | ||
177 | } | ||
178 | |||
179 | void homecache_evict(const struct cpumask *mask) | ||
180 | { | ||
181 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | ||
182 | } | ||
183 | |||
184 | /* Return a mask of the cpus whose caches currently own these pages. */ | ||
185 | static void homecache_mask(struct page *page, int pages, | ||
186 | struct cpumask *home_mask) | ||
187 | { | ||
188 | int i; | ||
189 | cpumask_clear(home_mask); | ||
190 | for (i = 0; i < pages; ++i) { | ||
191 | int home = page_home(&page[i]); | ||
192 | if (home == PAGE_HOME_IMMUTABLE || | ||
193 | home == PAGE_HOME_INCOHERENT) { | ||
194 | cpumask_copy(home_mask, cpu_possible_mask); | ||
195 | return; | ||
196 | } | ||
197 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
198 | if (home == PAGE_HOME_HASH) { | ||
199 | cpumask_or(home_mask, home_mask, &hash_for_home_map); | ||
200 | continue; | ||
201 | } | ||
202 | #endif | ||
203 | if (home == PAGE_HOME_UNCACHED) | ||
204 | continue; | ||
205 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
206 | cpumask_set_cpu(home, home_mask); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Return the passed length, or zero if it's long enough that we | ||
212 | * believe we should evict the whole L2 cache. | ||
213 | */ | ||
214 | static unsigned long cache_flush_length(unsigned long length) | ||
215 | { | ||
216 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | ||
217 | } | ||
218 | |||
219 | /* On the simulator, confirm lines have been evicted everywhere. */ | ||
220 | static void validate_lines_evicted(unsigned long pfn, size_t length) | ||
221 | { | ||
222 | sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, | ||
223 | (HV_PhysAddr)pfn << PAGE_SHIFT, length); | ||
224 | } | ||
225 | |||
226 | /* Flush a page out of whatever cache(s) it is in. */ | ||
227 | void homecache_flush_cache(struct page *page, int order) | ||
228 | { | ||
229 | int pages = 1 << order; | ||
230 | int length = cache_flush_length(pages * PAGE_SIZE); | ||
231 | unsigned long pfn = page_to_pfn(page); | ||
232 | struct cpumask home_mask; | ||
233 | |||
234 | homecache_mask(page, pages, &home_mask); | ||
235 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | ||
236 | validate_lines_evicted(pfn, pages * PAGE_SIZE); | ||
237 | } | ||
238 | |||
239 | |||
240 | /* Report the home corresponding to a given PTE. */ | ||
241 | static int pte_to_home(pte_t pte) | ||
242 | { | ||
243 | if (hv_pte_get_nc(pte)) | ||
244 | return PAGE_HOME_IMMUTABLE; | ||
245 | switch (hv_pte_get_mode(pte)) { | ||
246 | case HV_PTE_MODE_CACHE_TILE_L3: | ||
247 | return get_remote_cache_cpu(pte); | ||
248 | case HV_PTE_MODE_CACHE_NO_L3: | ||
249 | return PAGE_HOME_INCOHERENT; | ||
250 | case HV_PTE_MODE_UNCACHED: | ||
251 | return PAGE_HOME_UNCACHED; | ||
252 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
253 | case HV_PTE_MODE_CACHE_HASH_L3: | ||
254 | return PAGE_HOME_HASH; | ||
255 | #endif | ||
256 | } | ||
257 | panic("Bad PTE %#llx\n", pte.val); | ||
258 | } | ||
259 | |||
260 | /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */ | ||
261 | pte_t pte_set_home(pte_t pte, int home) | ||
262 | { | ||
263 | /* Check for non-linear file mapping "PTEs" and pass them through. */ | ||
264 | if (pte_file(pte)) | ||
265 | return pte; | ||
266 | |||
267 | #if CHIP_HAS_MMIO() | ||
268 | /* Check for MMIO mappings and pass them through. */ | ||
269 | if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO) | ||
270 | return pte; | ||
271 | #endif | ||
272 | |||
273 | |||
274 | /* | ||
275 | * Only immutable pages get NC mappings. If we have a | ||
276 | * non-coherent PTE, but the underlying page is not | ||
277 | * immutable, it's likely the result of a forced | ||
278 | * caching setting running up against ptrace setting | ||
279 | * the page to be writable underneath. In this case, | ||
280 | * just keep the PTE coherent. | ||
281 | */ | ||
282 | if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { | ||
283 | pte = hv_pte_clear_nc(pte); | ||
284 | pr_err("non-immutable page incoherently referenced: %#llx\n", | ||
285 | pte.val); | ||
286 | } | ||
287 | |||
288 | switch (home) { | ||
289 | |||
290 | case PAGE_HOME_UNCACHED: | ||
291 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); | ||
292 | break; | ||
293 | |||
294 | case PAGE_HOME_INCOHERENT: | ||
295 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
296 | break; | ||
297 | |||
298 | case PAGE_HOME_IMMUTABLE: | ||
299 | /* | ||
300 | * We could home this page anywhere, since it's immutable, | ||
301 | * but by default just home it to follow "hash_default". | ||
302 | */ | ||
303 | BUG_ON(hv_pte_get_writable(pte)); | ||
304 | if (pte_get_forcecache(pte)) { | ||
305 | /* Upgrade "force any cpu" to "No L3" for immutable. */ | ||
306 | if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3 | ||
307 | && pte_get_anyhome(pte)) { | ||
308 | pte = hv_pte_set_mode(pte, | ||
309 | HV_PTE_MODE_CACHE_NO_L3); | ||
310 | } | ||
311 | } else | ||
312 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
313 | if (hash_default) | ||
314 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); | ||
315 | else | ||
316 | #endif | ||
317 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
318 | pte = hv_pte_set_nc(pte); | ||
319 | break; | ||
320 | |||
321 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
322 | case PAGE_HOME_HASH: | ||
323 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); | ||
324 | break; | ||
325 | #endif | ||
326 | |||
327 | default: | ||
328 | BUG_ON(home < 0 || home >= NR_CPUS || | ||
329 | !cpu_is_valid_lotar(home)); | ||
330 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
331 | pte = set_remote_cache_cpu(pte, home); | ||
332 | break; | ||
333 | } | ||
334 | |||
335 | #if CHIP_HAS_NC_AND_NOALLOC_BITS() | ||
336 | if (noallocl2) | ||
337 | pte = hv_pte_set_no_alloc_l2(pte); | ||
338 | |||
339 | /* Simplify "no local and no l3" to "uncached" */ | ||
340 | if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) && | ||
341 | hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { | ||
342 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); | ||
343 | } | ||
344 | #endif | ||
345 | |||
346 | /* Checking this case here gives a better panic than from the hv. */ | ||
347 | BUG_ON(hv_pte_get_mode(pte) == 0); | ||
348 | |||
349 | return pte; | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * The routines in this section are the "static" versions of the normal | ||
354 | * dynamic homecaching routines; they just set the home cache | ||
355 | * of a kernel page once, and require a full-chip cache/TLB flush, | ||
356 | * so they're not suitable for anything but infrequent use. | ||
357 | */ | ||
358 | |||
359 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
360 | static inline int initial_page_home(void) { return PAGE_HOME_HASH; } | ||
361 | #else | ||
362 | static inline int initial_page_home(void) { return 0; } | ||
363 | #endif | ||
364 | |||
365 | int page_home(struct page *page) | ||
366 | { | ||
367 | if (PageHighMem(page)) { | ||
368 | return initial_page_home(); | ||
369 | } else { | ||
370 | unsigned long kva = (unsigned long)page_address(page); | ||
371 | return pte_to_home(*virt_to_pte(NULL, kva)); | ||
372 | } | ||
373 | } | ||
374 | |||
375 | void homecache_change_page_home(struct page *page, int order, int home) | ||
376 | { | ||
377 | int i, pages = (1 << order); | ||
378 | unsigned long kva; | ||
379 | |||
380 | BUG_ON(PageHighMem(page)); | ||
381 | BUG_ON(page_count(page) > 1); | ||
382 | BUG_ON(page_mapcount(page) != 0); | ||
383 | kva = (unsigned long) page_address(page); | ||
384 | flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, | ||
385 | kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, | ||
386 | NULL, 0); | ||
387 | |||
388 | for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { | ||
389 | pte_t *ptep = virt_to_pte(NULL, kva); | ||
390 | pte_t pteval = *ptep; | ||
391 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); | ||
392 | *ptep = pte_set_home(pteval, home); | ||
393 | } | ||
394 | } | ||
395 | |||
396 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
397 | unsigned int order, int home) | ||
398 | { | ||
399 | struct page *page; | ||
400 | BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ | ||
401 | page = alloc_pages(gfp_mask, order); | ||
402 | if (page) | ||
403 | homecache_change_page_home(page, order, home); | ||
404 | return page; | ||
405 | } | ||
406 | |||
407 | struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
408 | unsigned int order, int home) | ||
409 | { | ||
410 | struct page *page; | ||
411 | BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ | ||
412 | page = alloc_pages_node(nid, gfp_mask, order); | ||
413 | if (page) | ||
414 | homecache_change_page_home(page, order, home); | ||
415 | return page; | ||
416 | } | ||
417 | |||
418 | void homecache_free_pages(unsigned long addr, unsigned int order) | ||
419 | { | ||
420 | struct page *page; | ||
421 | |||
422 | if (addr == 0) | ||
423 | return; | ||
424 | |||
425 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||
426 | page = virt_to_page((void *)addr); | ||
427 | if (put_page_testzero(page)) { | ||
428 | int pages = (1 << order); | ||
429 | homecache_change_page_home(page, order, initial_page_home()); | ||
430 | while (pages--) | ||
431 | __free_page(page++); | ||
432 | } | ||
433 | } | ||
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c new file mode 100644 index 000000000000..24688b697a8d --- /dev/null +++ b/arch/tile/mm/hugetlbpage.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE Huge TLB Page Support for Kernel. | ||
15 | * Taken from i386 hugetlb implementation: | ||
16 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/hugetlb.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/sysctl.h> | ||
28 | #include <linux/mman.h> | ||
29 | #include <asm/tlb.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | |||
32 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
33 | unsigned long addr, unsigned long sz) | ||
34 | { | ||
35 | pgd_t *pgd; | ||
36 | pud_t *pud; | ||
37 | pte_t *pte = NULL; | ||
38 | |||
39 | /* We do not yet support multiple huge page sizes. */ | ||
40 | BUG_ON(sz != PMD_SIZE); | ||
41 | |||
42 | pgd = pgd_offset(mm, addr); | ||
43 | pud = pud_alloc(mm, pgd, addr); | ||
44 | if (pud) | ||
45 | pte = (pte_t *) pmd_alloc(mm, pud, addr); | ||
46 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | ||
47 | |||
48 | return pte; | ||
49 | } | ||
50 | |||
51 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
52 | { | ||
53 | pgd_t *pgd; | ||
54 | pud_t *pud; | ||
55 | pmd_t *pmd = NULL; | ||
56 | |||
57 | pgd = pgd_offset(mm, addr); | ||
58 | if (pgd_present(*pgd)) { | ||
59 | pud = pud_offset(pgd, addr); | ||
60 | if (pud_present(*pud)) | ||
61 | pmd = pmd_offset(pud, addr); | ||
62 | } | ||
63 | return (pte_t *) pmd; | ||
64 | } | ||
65 | |||
66 | #ifdef HUGETLB_TEST | ||
67 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
68 | int write) | ||
69 | { | ||
70 | unsigned long start = address; | ||
71 | int length = 1; | ||
72 | int nr; | ||
73 | struct page *page; | ||
74 | struct vm_area_struct *vma; | ||
75 | |||
76 | vma = find_vma(mm, addr); | ||
77 | if (!vma || !is_vm_hugetlb_page(vma)) | ||
78 | return ERR_PTR(-EINVAL); | ||
79 | |||
80 | pte = huge_pte_offset(mm, address); | ||
81 | |||
82 | /* hugetlb should be locked, and hence, prefaulted */ | ||
83 | WARN_ON(!pte || pte_none(*pte)); | ||
84 | |||
85 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | ||
86 | |||
87 | WARN_ON(!PageHead(page)); | ||
88 | |||
89 | return page; | ||
90 | } | ||
91 | |||
92 | int pmd_huge(pmd_t pmd) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | int pud_huge(pud_t pud) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
103 | pmd_t *pmd, int write) | ||
104 | { | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | #else | ||
109 | |||
110 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
111 | int write) | ||
112 | { | ||
113 | return ERR_PTR(-EINVAL); | ||
114 | } | ||
115 | |||
116 | int pmd_huge(pmd_t pmd) | ||
117 | { | ||
118 | return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE); | ||
119 | } | ||
120 | |||
121 | int pud_huge(pud_t pud) | ||
122 | { | ||
123 | return !!(pud_val(pud) & _PAGE_HUGE_PAGE); | ||
124 | } | ||
125 | |||
126 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
127 | pmd_t *pmd, int write) | ||
128 | { | ||
129 | struct page *page; | ||
130 | |||
131 | page = pte_page(*(pte_t *)pmd); | ||
132 | if (page) | ||
133 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | ||
134 | return page; | ||
135 | } | ||
136 | |||
137 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, | ||
138 | pud_t *pud, int write) | ||
139 | { | ||
140 | struct page *page; | ||
141 | |||
142 | page = pte_page(*(pte_t *)pud); | ||
143 | if (page) | ||
144 | page += ((address & ~PUD_MASK) >> PAGE_SHIFT); | ||
145 | return page; | ||
146 | } | ||
147 | |||
148 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
149 | { | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | #endif | ||
154 | |||
155 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
156 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | ||
157 | unsigned long addr, unsigned long len, | ||
158 | unsigned long pgoff, unsigned long flags) | ||
159 | { | ||
160 | struct hstate *h = hstate_file(file); | ||
161 | struct mm_struct *mm = current->mm; | ||
162 | struct vm_area_struct *vma; | ||
163 | unsigned long start_addr; | ||
164 | |||
165 | if (len > mm->cached_hole_size) { | ||
166 | start_addr = mm->free_area_cache; | ||
167 | } else { | ||
168 | start_addr = TASK_UNMAPPED_BASE; | ||
169 | mm->cached_hole_size = 0; | ||
170 | } | ||
171 | |||
172 | full_search: | ||
173 | addr = ALIGN(start_addr, huge_page_size(h)); | ||
174 | |||
175 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
176 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
177 | if (TASK_SIZE - len < addr) { | ||
178 | /* | ||
179 | * Start a new search - just in case we missed | ||
180 | * some holes. | ||
181 | */ | ||
182 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
183 | start_addr = TASK_UNMAPPED_BASE; | ||
184 | mm->cached_hole_size = 0; | ||
185 | goto full_search; | ||
186 | } | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | if (!vma || addr + len <= vma->vm_start) { | ||
190 | mm->free_area_cache = addr + len; | ||
191 | return addr; | ||
192 | } | ||
193 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
194 | mm->cached_hole_size = vma->vm_start - addr; | ||
195 | addr = ALIGN(vma->vm_end, huge_page_size(h)); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | ||
200 | unsigned long addr0, unsigned long len, | ||
201 | unsigned long pgoff, unsigned long flags) | ||
202 | { | ||
203 | struct hstate *h = hstate_file(file); | ||
204 | struct mm_struct *mm = current->mm; | ||
205 | struct vm_area_struct *vma, *prev_vma; | ||
206 | unsigned long base = mm->mmap_base, addr = addr0; | ||
207 | unsigned long largest_hole = mm->cached_hole_size; | ||
208 | int first_time = 1; | ||
209 | |||
210 | /* don't allow allocations above current base */ | ||
211 | if (mm->free_area_cache > base) | ||
212 | mm->free_area_cache = base; | ||
213 | |||
214 | if (len <= largest_hole) { | ||
215 | largest_hole = 0; | ||
216 | mm->free_area_cache = base; | ||
217 | } | ||
218 | try_again: | ||
219 | /* make sure it can fit in the remaining address space */ | ||
220 | if (mm->free_area_cache < len) | ||
221 | goto fail; | ||
222 | |||
223 | /* either no address requested or cant fit in requested address hole */ | ||
224 | addr = (mm->free_area_cache - len) & huge_page_mask(h); | ||
225 | do { | ||
226 | /* | ||
227 | * Lookup failure means no vma is above this address, | ||
228 | * i.e. return with success: | ||
229 | */ | ||
230 | vma = find_vma_prev(mm, addr, &prev_vma); | ||
231 | if (!vma) { | ||
232 | return addr; | ||
233 | break; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * new region fits between prev_vma->vm_end and | ||
238 | * vma->vm_start, use it: | ||
239 | */ | ||
240 | if (addr + len <= vma->vm_start && | ||
241 | (!prev_vma || (addr >= prev_vma->vm_end))) { | ||
242 | /* remember the address as a hint for next time */ | ||
243 | mm->cached_hole_size = largest_hole; | ||
244 | mm->free_area_cache = addr; | ||
245 | return addr; | ||
246 | } else { | ||
247 | /* pull free_area_cache down to the first hole */ | ||
248 | if (mm->free_area_cache == vma->vm_end) { | ||
249 | mm->free_area_cache = vma->vm_start; | ||
250 | mm->cached_hole_size = largest_hole; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* remember the largest hole we saw so far */ | ||
255 | if (addr + largest_hole < vma->vm_start) | ||
256 | largest_hole = vma->vm_start - addr; | ||
257 | |||
258 | /* try just below the current vma->vm_start */ | ||
259 | addr = (vma->vm_start - len) & huge_page_mask(h); | ||
260 | |||
261 | } while (len <= vma->vm_start); | ||
262 | |||
263 | fail: | ||
264 | /* | ||
265 | * if hint left us with no space for the requested | ||
266 | * mapping then try again: | ||
267 | */ | ||
268 | if (first_time) { | ||
269 | mm->free_area_cache = base; | ||
270 | largest_hole = 0; | ||
271 | first_time = 0; | ||
272 | goto try_again; | ||
273 | } | ||
274 | /* | ||
275 | * A failed mmap() very likely causes application failure, | ||
276 | * so fall back to the bottom-up function here. This scenario | ||
277 | * can happen with large stack limits and large mmap() | ||
278 | * allocations. | ||
279 | */ | ||
280 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
281 | mm->cached_hole_size = ~0UL; | ||
282 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, | ||
283 | len, pgoff, flags); | ||
284 | |||
285 | /* | ||
286 | * Restore the topdown base: | ||
287 | */ | ||
288 | mm->free_area_cache = base; | ||
289 | mm->cached_hole_size = ~0UL; | ||
290 | |||
291 | return addr; | ||
292 | } | ||
293 | |||
294 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
295 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
296 | { | ||
297 | struct hstate *h = hstate_file(file); | ||
298 | struct mm_struct *mm = current->mm; | ||
299 | struct vm_area_struct *vma; | ||
300 | |||
301 | if (len & ~huge_page_mask(h)) | ||
302 | return -EINVAL; | ||
303 | if (len > TASK_SIZE) | ||
304 | return -ENOMEM; | ||
305 | |||
306 | if (flags & MAP_FIXED) { | ||
307 | if (prepare_hugepage_range(file, addr, len)) | ||
308 | return -EINVAL; | ||
309 | return addr; | ||
310 | } | ||
311 | |||
312 | if (addr) { | ||
313 | addr = ALIGN(addr, huge_page_size(h)); | ||
314 | vma = find_vma(mm, addr); | ||
315 | if (TASK_SIZE - len >= addr && | ||
316 | (!vma || addr + len <= vma->vm_start)) | ||
317 | return addr; | ||
318 | } | ||
319 | if (current->mm->get_unmapped_area == arch_get_unmapped_area) | ||
320 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | ||
321 | pgoff, flags); | ||
322 | else | ||
323 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | ||
324 | pgoff, flags); | ||
325 | } | ||
326 | |||
327 | static __init int setup_hugepagesz(char *opt) | ||
328 | { | ||
329 | unsigned long ps = memparse(opt, &opt); | ||
330 | if (ps == PMD_SIZE) { | ||
331 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | ||
332 | } else if (ps == PUD_SIZE) { | ||
333 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | ||
334 | } else { | ||
335 | pr_err("hugepagesz: Unsupported page size %lu M\n", | ||
336 | ps >> 20); | ||
337 | return 0; | ||
338 | } | ||
339 | return 1; | ||
340 | } | ||
341 | __setup("hugepagesz=", setup_hugepagesz); | ||
342 | |||
343 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | ||
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c new file mode 100644 index 000000000000..d89c9eacd162 --- /dev/null +++ b/arch/tile/mm/init.c | |||
@@ -0,0 +1,1085 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995 Linus Torvalds | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/hugetlb.h> | ||
27 | #include <linux/swap.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/pagemap.h> | ||
32 | #include <linux/poison.h> | ||
33 | #include <linux/bootmem.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | #include <linux/efi.h> | ||
37 | #include <linux/memory_hotplug.h> | ||
38 | #include <linux/uaccess.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/processor.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/pgalloc.h> | ||
44 | #include <asm/dma.h> | ||
45 | #include <asm/fixmap.h> | ||
46 | #include <asm/tlb.h> | ||
47 | #include <asm/tlbflush.h> | ||
48 | #include <asm/sections.h> | ||
49 | #include <asm/setup.h> | ||
50 | #include <asm/homecache.h> | ||
51 | #include <hv/hypervisor.h> | ||
52 | #include <arch/chip.h> | ||
53 | |||
54 | #include "migrate.h" | ||
55 | |||
56 | /* | ||
57 | * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)" | ||
58 | * in the Tile Kconfig, but this generates configure warnings. | ||
59 | * Do it here and force people to get it right to compile this file. | ||
60 | * The problem is that with 4KB small pages and 16MB huge pages, | ||
61 | * the default value doesn't allow us to group enough small pages | ||
62 | * together to make up a huge page. | ||
63 | */ | ||
64 | #if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1 | ||
65 | # error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size" | ||
66 | #endif | ||
67 | |||
68 | #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) | ||
69 | |||
70 | #ifndef __tilegx__ | ||
71 | unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; | ||
72 | #endif | ||
73 | |||
74 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
75 | |||
76 | /* Create an L2 page table */ | ||
77 | static pte_t * __init alloc_pte(void) | ||
78 | { | ||
79 | return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * L2 page tables per controller. We allocate these all at once from | ||
84 | * the bootmem allocator and store them here. This saves on kernel L2 | ||
85 | * page table memory, compared to allocating a full 64K page per L2 | ||
86 | * page table, and also means that in cases where we use huge pages, | ||
87 | * we are guaranteed to later be able to shatter those huge pages and | ||
88 | * switch to using these page tables instead, without requiring | ||
89 | * further allocation. Each l2_ptes[] entry points to the first page | ||
90 | * table for the first hugepage-size piece of memory on the | ||
91 | * controller; other page tables are just indexed directly, i.e. the | ||
92 | * L2 page tables are contiguous in memory for each controller. | ||
93 | */ | ||
94 | static pte_t *l2_ptes[MAX_NUMNODES]; | ||
95 | static int num_l2_ptes[MAX_NUMNODES]; | ||
96 | |||
97 | static void init_prealloc_ptes(int node, int pages) | ||
98 | { | ||
99 | BUG_ON(pages & (HV_L2_ENTRIES-1)); | ||
100 | if (pages) { | ||
101 | num_l2_ptes[node] = pages; | ||
102 | l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), | ||
103 | HV_PAGE_TABLE_ALIGN, 0); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | pte_t *get_prealloc_pte(unsigned long pfn) | ||
108 | { | ||
109 | int node = pfn_to_nid(pfn); | ||
110 | pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)); | ||
111 | BUG_ON(node >= MAX_NUMNODES); | ||
112 | BUG_ON(pfn >= num_l2_ptes[node]); | ||
113 | return &l2_ptes[node][pfn]; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * What caching do we expect pages from the heap to have when | ||
118 | * they are allocated during bootup? (Once we've installed the | ||
119 | * "real" swapper_pg_dir.) | ||
120 | */ | ||
121 | static int initial_heap_home(void) | ||
122 | { | ||
123 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
124 | if (hash_default) | ||
125 | return PAGE_HOME_HASH; | ||
126 | #endif | ||
127 | return smp_processor_id(); | ||
128 | } | ||
129 | |||
130 | /* | ||
131 | * Place a pointer to an L2 page table in a middle page | ||
132 | * directory entry. | ||
133 | */ | ||
134 | static void __init assign_pte(pmd_t *pmd, pte_t *page_table) | ||
135 | { | ||
136 | phys_addr_t pa = __pa(page_table); | ||
137 | unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN; | ||
138 | pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn); | ||
139 | BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0); | ||
140 | pteval = pte_set_home(pteval, initial_heap_home()); | ||
141 | *(pte_t *)pmd = pteval; | ||
142 | if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) | ||
143 | BUG(); | ||
144 | } | ||
145 | |||
146 | #ifdef __tilegx__ | ||
147 | |||
148 | #if HV_L1_SIZE != HV_L2_SIZE | ||
149 | # error Rework assumption that L1 and L2 page tables are same size. | ||
150 | #endif | ||
151 | |||
152 | /* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ | ||
153 | static inline pmd_t *alloc_pmd(void) | ||
154 | { | ||
155 | return (pmd_t *)alloc_pte(); | ||
156 | } | ||
157 | |||
158 | static inline void assign_pmd(pud_t *pud, pmd_t *pmd) | ||
159 | { | ||
160 | assign_pte((pmd_t *)pud, (pte_t *)pmd); | ||
161 | } | ||
162 | |||
163 | #endif /* __tilegx__ */ | ||
164 | |||
165 | /* Replace the given pmd with a full PTE table. */ | ||
166 | void __init shatter_pmd(pmd_t *pmd) | ||
167 | { | ||
168 | pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd)); | ||
169 | assign_pte(pmd, pte); | ||
170 | } | ||
171 | |||
172 | #ifdef CONFIG_HIGHMEM | ||
173 | /* | ||
174 | * This function initializes a certain range of kernel virtual memory | ||
175 | * with new bootmem page tables, everywhere page tables are missing in | ||
176 | * the given range. | ||
177 | */ | ||
178 | |||
179 | /* | ||
180 | * NOTE: The pagetables are allocated contiguous on the physical space | ||
181 | * so we can cache the place of the first one and move around without | ||
182 | * checking the pgd every time. | ||
183 | */ | ||
184 | static void __init page_table_range_init(unsigned long start, | ||
185 | unsigned long end, pgd_t *pgd_base) | ||
186 | { | ||
187 | pgd_t *pgd; | ||
188 | int pgd_idx; | ||
189 | unsigned long vaddr; | ||
190 | |||
191 | vaddr = start; | ||
192 | pgd_idx = pgd_index(vaddr); | ||
193 | pgd = pgd_base + pgd_idx; | ||
194 | |||
195 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | ||
196 | pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); | ||
197 | if (pmd_none(*pmd)) | ||
198 | assign_pte(pmd, alloc_pte()); | ||
199 | vaddr += PMD_SIZE; | ||
200 | } | ||
201 | } | ||
202 | #endif /* CONFIG_HIGHMEM */ | ||
203 | |||
204 | |||
205 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
206 | |||
207 | static int __initdata ktext_hash = 1; /* .text pages */ | ||
208 | static int __initdata kdata_hash = 1; /* .data and .bss pages */ | ||
209 | int __write_once hash_default = 1; /* kernel allocator pages */ | ||
210 | EXPORT_SYMBOL(hash_default); | ||
211 | int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ | ||
212 | #endif /* CHIP_HAS_CBOX_HOME_MAP */ | ||
213 | |||
214 | /* | ||
215 | * CPUs to use to for striping the pages of kernel data. If hash-for-home | ||
216 | * is available, this is only relevant if kcache_hash sets up the | ||
217 | * .data and .bss to be page-homed, and we don't want the default mode | ||
218 | * of using the full set of kernel cpus for the striping. | ||
219 | */ | ||
220 | static __initdata struct cpumask kdata_mask; | ||
221 | static __initdata int kdata_arg_seen; | ||
222 | |||
223 | int __write_once kdata_huge; /* if no homecaching, small pages */ | ||
224 | |||
225 | |||
226 | /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ | ||
227 | static pgprot_t __init construct_pgprot(pgprot_t prot, int home) | ||
228 | { | ||
229 | prot = pte_set_home(prot, home); | ||
230 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
231 | if (home == PAGE_HOME_IMMUTABLE) { | ||
232 | if (ktext_hash) | ||
233 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); | ||
234 | else | ||
235 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); | ||
236 | } | ||
237 | #endif | ||
238 | return prot; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * For a given kernel data VA, how should it be cached? | ||
243 | * We return the complete pgprot_t with caching bits set. | ||
244 | */ | ||
245 | static pgprot_t __init init_pgprot(ulong address) | ||
246 | { | ||
247 | int cpu; | ||
248 | unsigned long page; | ||
249 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | ||
250 | |||
251 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
252 | /* For kdata=huge, everything is just hash-for-home. */ | ||
253 | if (kdata_huge) | ||
254 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | ||
255 | #endif | ||
256 | |||
257 | /* We map the aliased pages of permanent text inaccessible. */ | ||
258 | if (address < (ulong) _sinittext - CODE_DELTA) | ||
259 | return PAGE_NONE; | ||
260 | |||
261 | /* | ||
262 | * We map read-only data non-coherent for performance. We could | ||
263 | * use neighborhood caching on TILE64, but it's not clear it's a win. | ||
264 | */ | ||
265 | if ((address >= (ulong) __start_rodata && | ||
266 | address < (ulong) __end_rodata) || | ||
267 | address == (ulong) empty_zero_page) { | ||
268 | return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); | ||
269 | } | ||
270 | |||
271 | /* As a performance optimization, keep the boot init stack here. */ | ||
272 | if (address >= (ulong)&init_thread_union && | ||
273 | address < (ulong)&init_thread_union + THREAD_SIZE) | ||
274 | return construct_pgprot(PAGE_KERNEL, smp_processor_id()); | ||
275 | |||
276 | #ifndef __tilegx__ | ||
277 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
278 | /* Force the atomic_locks[] array page to be hash-for-home. */ | ||
279 | if (address == (ulong) atomic_locks) | ||
280 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | ||
281 | #endif | ||
282 | #endif | ||
283 | |||
284 | /* | ||
285 | * Everything else that isn't data or bss is heap, so mark it | ||
286 | * with the initial heap home (hash-for-home, or this cpu). This | ||
287 | * includes any addresses after the loaded image and any address before | ||
288 | * _einitdata, since we already captured the case of text before | ||
289 | * _sinittext, and __pa(einittext) is approximately __pa(sinitdata). | ||
290 | * | ||
291 | * All the LOWMEM pages that we mark this way will get their | ||
292 | * struct page homecache properly marked later, in set_page_homes(). | ||
293 | * The HIGHMEM pages we leave with a default zero for their | ||
294 | * homes, but with a zero free_time we don't have to actually | ||
295 | * do a flush action the first time we use them, either. | ||
296 | */ | ||
297 | if (address >= (ulong) _end || address < (ulong) _einitdata) | ||
298 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); | ||
299 | |||
300 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
301 | /* Use hash-for-home if requested for data/bss. */ | ||
302 | if (kdata_hash) | ||
303 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | ||
304 | #endif | ||
305 | |||
306 | /* | ||
307 | * Make the w1data homed like heap to start with, to avoid | ||
308 | * making it part of the page-striped data area when we're just | ||
309 | * going to convert it to read-only soon anyway. | ||
310 | */ | ||
311 | if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end) | ||
312 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); | ||
313 | |||
314 | /* | ||
315 | * Otherwise we just hand out consecutive cpus. To avoid | ||
316 | * requiring this function to hold state, we just walk forward from | ||
317 | * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach | ||
318 | * the requested address, while walking cpu home around kdata_mask. | ||
319 | * This is typically no more than a dozen or so iterations. | ||
320 | */ | ||
321 | page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; | ||
322 | BUG_ON(address < page || address >= (ulong)_end); | ||
323 | cpu = cpumask_first(&kdata_mask); | ||
324 | for (; page < address; page += PAGE_SIZE) { | ||
325 | if (page >= (ulong)&init_thread_union && | ||
326 | page < (ulong)&init_thread_union + THREAD_SIZE) | ||
327 | continue; | ||
328 | if (page == (ulong)empty_zero_page) | ||
329 | continue; | ||
330 | #ifndef __tilegx__ | ||
331 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
332 | if (page == (ulong)atomic_locks) | ||
333 | continue; | ||
334 | #endif | ||
335 | #endif | ||
336 | cpu = cpumask_next(cpu, &kdata_mask); | ||
337 | if (cpu == NR_CPUS) | ||
338 | cpu = cpumask_first(&kdata_mask); | ||
339 | } | ||
340 | return construct_pgprot(PAGE_KERNEL, cpu); | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * This function sets up how we cache the kernel text. If we have | ||
345 | * hash-for-home support, normally that is used instead (see the | ||
346 | * kcache_hash boot flag for more information). But if we end up | ||
347 | * using a page-based caching technique, this option sets up the | ||
348 | * details of that. In addition, the "ktext=nocache" option may | ||
349 | * always be used to disable local caching of text pages, if desired. | ||
350 | */ | ||
351 | |||
352 | static int __initdata ktext_arg_seen; | ||
353 | static int __initdata ktext_small; | ||
354 | static int __initdata ktext_local; | ||
355 | static int __initdata ktext_all; | ||
356 | static int __initdata ktext_nondataplane; | ||
357 | static int __initdata ktext_nocache; | ||
358 | static struct cpumask __initdata ktext_mask; | ||
359 | |||
360 | static int __init setup_ktext(char *str) | ||
361 | { | ||
362 | if (str == NULL) | ||
363 | return -EINVAL; | ||
364 | |||
365 | /* If you have a leading "nocache", turn off ktext caching */ | ||
366 | if (strncmp(str, "nocache", 7) == 0) { | ||
367 | ktext_nocache = 1; | ||
368 | pr_info("ktext: disabling local caching of kernel text\n"); | ||
369 | str += 7; | ||
370 | if (*str == ',') | ||
371 | ++str; | ||
372 | if (*str == '\0') | ||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | ktext_arg_seen = 1; | ||
377 | |||
378 | /* Default setting on Tile64: use a huge page */ | ||
379 | if (strcmp(str, "huge") == 0) | ||
380 | pr_info("ktext: using one huge locally cached page\n"); | ||
381 | |||
382 | /* Pay TLB cost but get no cache benefit: cache small pages locally */ | ||
383 | else if (strcmp(str, "local") == 0) { | ||
384 | ktext_small = 1; | ||
385 | ktext_local = 1; | ||
386 | pr_info("ktext: using small pages with local caching\n"); | ||
387 | } | ||
388 | |||
389 | /* Neighborhood cache ktext pages on all cpus. */ | ||
390 | else if (strcmp(str, "all") == 0) { | ||
391 | ktext_small = 1; | ||
392 | ktext_all = 1; | ||
393 | pr_info("ktext: using maximal caching neighborhood\n"); | ||
394 | } | ||
395 | |||
396 | |||
397 | /* Neighborhood ktext pages on specified mask */ | ||
398 | else if (cpulist_parse(str, &ktext_mask) == 0) { | ||
399 | char buf[NR_CPUS * 5]; | ||
400 | cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); | ||
401 | if (cpumask_weight(&ktext_mask) > 1) { | ||
402 | ktext_small = 1; | ||
403 | pr_info("ktext: using caching neighborhood %s " | ||
404 | "with small pages\n", buf); | ||
405 | } else { | ||
406 | pr_info("ktext: caching on cpu %s with one huge page\n", | ||
407 | buf); | ||
408 | } | ||
409 | } | ||
410 | |||
411 | else if (*str) | ||
412 | return -EINVAL; | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | early_param("ktext", setup_ktext); | ||
418 | |||
419 | |||
420 | static inline pgprot_t ktext_set_nocache(pgprot_t prot) | ||
421 | { | ||
422 | if (!ktext_nocache) | ||
423 | prot = hv_pte_set_nc(prot); | ||
424 | #if CHIP_HAS_NC_AND_NOALLOC_BITS() | ||
425 | else | ||
426 | prot = hv_pte_set_no_alloc_l2(prot); | ||
427 | #endif | ||
428 | return prot; | ||
429 | } | ||
430 | |||
431 | #ifndef __tilegx__ | ||
432 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
433 | { | ||
434 | return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); | ||
435 | } | ||
436 | #else | ||
437 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
438 | { | ||
439 | pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); | ||
440 | if (pud_none(*pud)) | ||
441 | assign_pmd(pud, alloc_pmd()); | ||
442 | return pmd_offset(pud, va); | ||
443 | } | ||
444 | #endif | ||
445 | |||
446 | /* Temporary page table we use for staging. */ | ||
447 | static pgd_t pgtables[PTRS_PER_PGD] | ||
448 | __attribute__((section(".init.page"))); | ||
449 | |||
450 | /* | ||
451 | * This maps the physical memory to kernel virtual address space, a total | ||
452 | * of max_low_pfn pages, by creating page tables starting from address | ||
453 | * PAGE_OFFSET. | ||
454 | * | ||
455 | * This routine transitions us from using a set of compiled-in large | ||
456 | * pages to using some more precise caching, including removing access | ||
457 | * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START) | ||
458 | * marking read-only data as locally cacheable, striping the remaining | ||
459 | * .data and .bss across all the available tiles, and removing access | ||
460 | * to pages above the top of RAM (thus ensuring a page fault from a bad | ||
461 | * virtual address rather than a hypervisor shoot down for accessing | ||
462 | * memory outside the assigned limits). | ||
463 | */ | ||
464 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | ||
465 | { | ||
466 | unsigned long address, pfn; | ||
467 | pmd_t *pmd; | ||
468 | pte_t *pte; | ||
469 | int pte_ofs; | ||
470 | const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id()); | ||
471 | struct cpumask kstripe_mask; | ||
472 | int rc, i; | ||
473 | |||
474 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
475 | if (ktext_arg_seen && ktext_hash) { | ||
476 | pr_warning("warning: \"ktext\" boot argument ignored" | ||
477 | " if \"kcache_hash\" sets up text hash-for-home\n"); | ||
478 | ktext_small = 0; | ||
479 | } | ||
480 | |||
481 | if (kdata_arg_seen && kdata_hash) { | ||
482 | pr_warning("warning: \"kdata\" boot argument ignored" | ||
483 | " if \"kcache_hash\" sets up data hash-for-home\n"); | ||
484 | } | ||
485 | |||
486 | if (kdata_huge && !hash_default) { | ||
487 | pr_warning("warning: disabling \"kdata=huge\"; requires" | ||
488 | " kcache_hash=all or =allbutstack\n"); | ||
489 | kdata_huge = 0; | ||
490 | } | ||
491 | #endif | ||
492 | |||
493 | /* | ||
494 | * Set up a mask for cpus to use for kernel striping. | ||
495 | * This is normally all cpus, but minus dataplane cpus if any. | ||
496 | * If the dataplane covers the whole chip, we stripe over | ||
497 | * the whole chip too. | ||
498 | */ | ||
499 | cpumask_copy(&kstripe_mask, cpu_possible_mask); | ||
500 | if (!kdata_arg_seen) | ||
501 | kdata_mask = kstripe_mask; | ||
502 | |||
503 | /* Allocate and fill in L2 page tables */ | ||
504 | for (i = 0; i < MAX_NUMNODES; ++i) { | ||
505 | #ifdef CONFIG_HIGHMEM | ||
506 | unsigned long end_pfn = node_lowmem_end_pfn[i]; | ||
507 | #else | ||
508 | unsigned long end_pfn = node_end_pfn[i]; | ||
509 | #endif | ||
510 | unsigned long end_huge_pfn = 0; | ||
511 | |||
512 | /* Pre-shatter the last huge page to allow per-cpu pages. */ | ||
513 | if (kdata_huge) | ||
514 | end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT); | ||
515 | |||
516 | pfn = node_start_pfn[i]; | ||
517 | |||
518 | /* Allocate enough memory to hold L2 page tables for node. */ | ||
519 | init_prealloc_ptes(i, end_pfn - pfn); | ||
520 | |||
521 | address = (unsigned long) pfn_to_kaddr(pfn); | ||
522 | while (pfn < end_pfn) { | ||
523 | BUG_ON(address & (HPAGE_SIZE-1)); | ||
524 | pmd = get_pmd(pgtables, address); | ||
525 | pte = get_prealloc_pte(pfn); | ||
526 | if (pfn < end_huge_pfn) { | ||
527 | pgprot_t prot = init_pgprot(address); | ||
528 | *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); | ||
529 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; | ||
530 | pfn++, pte_ofs++, address += PAGE_SIZE) | ||
531 | pte[pte_ofs] = pfn_pte(pfn, prot); | ||
532 | } else { | ||
533 | if (kdata_huge) | ||
534 | printk(KERN_DEBUG "pre-shattered huge" | ||
535 | " page at %#lx\n", address); | ||
536 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; | ||
537 | pfn++, pte_ofs++, address += PAGE_SIZE) { | ||
538 | pgprot_t prot = init_pgprot(address); | ||
539 | pte[pte_ofs] = pfn_pte(pfn, prot); | ||
540 | } | ||
541 | assign_pte(pmd, pte); | ||
542 | } | ||
543 | } | ||
544 | } | ||
545 | |||
546 | /* | ||
547 | * Set or check ktext_map now that we have cpu_possible_mask | ||
548 | * and kstripe_mask to work with. | ||
549 | */ | ||
550 | if (ktext_all) | ||
551 | cpumask_copy(&ktext_mask, cpu_possible_mask); | ||
552 | else if (ktext_nondataplane) | ||
553 | ktext_mask = kstripe_mask; | ||
554 | else if (!cpumask_empty(&ktext_mask)) { | ||
555 | /* Sanity-check any mask that was requested */ | ||
556 | struct cpumask bad; | ||
557 | cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask); | ||
558 | cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask); | ||
559 | if (!cpumask_empty(&bad)) { | ||
560 | char buf[NR_CPUS * 5]; | ||
561 | cpulist_scnprintf(buf, sizeof(buf), &bad); | ||
562 | pr_info("ktext: not using unavailable cpus %s\n", buf); | ||
563 | } | ||
564 | if (cpumask_empty(&ktext_mask)) { | ||
565 | pr_warning("ktext: no valid cpus; caching on %d.\n", | ||
566 | smp_processor_id()); | ||
567 | cpumask_copy(&ktext_mask, | ||
568 | cpumask_of(smp_processor_id())); | ||
569 | } | ||
570 | } | ||
571 | |||
572 | address = MEM_SV_INTRPT; | ||
573 | pmd = get_pmd(pgtables, address); | ||
574 | if (ktext_small) { | ||
575 | /* Allocate an L2 PTE for the kernel text */ | ||
576 | int cpu = 0; | ||
577 | pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC, | ||
578 | PAGE_HOME_IMMUTABLE); | ||
579 | |||
580 | if (ktext_local) { | ||
581 | if (ktext_nocache) | ||
582 | prot = hv_pte_set_mode(prot, | ||
583 | HV_PTE_MODE_UNCACHED); | ||
584 | else | ||
585 | prot = hv_pte_set_mode(prot, | ||
586 | HV_PTE_MODE_CACHE_NO_L3); | ||
587 | } else { | ||
588 | prot = hv_pte_set_mode(prot, | ||
589 | HV_PTE_MODE_CACHE_TILE_L3); | ||
590 | cpu = cpumask_first(&ktext_mask); | ||
591 | |||
592 | prot = ktext_set_nocache(prot); | ||
593 | } | ||
594 | |||
595 | BUG_ON(address != (unsigned long)_stext); | ||
596 | pfn = 0; /* code starts at PA 0 */ | ||
597 | pte = alloc_pte(); | ||
598 | for (pte_ofs = 0; address < (unsigned long)_einittext; | ||
599 | pfn++, pte_ofs++, address += PAGE_SIZE) { | ||
600 | if (!ktext_local) { | ||
601 | prot = set_remote_cache_cpu(prot, cpu); | ||
602 | cpu = cpumask_next(cpu, &ktext_mask); | ||
603 | if (cpu == NR_CPUS) | ||
604 | cpu = cpumask_first(&ktext_mask); | ||
605 | } | ||
606 | pte[pte_ofs] = pfn_pte(pfn, prot); | ||
607 | } | ||
608 | assign_pte(pmd, pte); | ||
609 | } else { | ||
610 | pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); | ||
611 | pteval = pte_mkhuge(pteval); | ||
612 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
613 | if (ktext_hash) { | ||
614 | pteval = hv_pte_set_mode(pteval, | ||
615 | HV_PTE_MODE_CACHE_HASH_L3); | ||
616 | pteval = ktext_set_nocache(pteval); | ||
617 | } else | ||
618 | #endif /* CHIP_HAS_CBOX_HOME_MAP() */ | ||
619 | if (cpumask_weight(&ktext_mask) == 1) { | ||
620 | pteval = set_remote_cache_cpu(pteval, | ||
621 | cpumask_first(&ktext_mask)); | ||
622 | pteval = hv_pte_set_mode(pteval, | ||
623 | HV_PTE_MODE_CACHE_TILE_L3); | ||
624 | pteval = ktext_set_nocache(pteval); | ||
625 | } else if (ktext_nocache) | ||
626 | pteval = hv_pte_set_mode(pteval, | ||
627 | HV_PTE_MODE_UNCACHED); | ||
628 | else | ||
629 | pteval = hv_pte_set_mode(pteval, | ||
630 | HV_PTE_MODE_CACHE_NO_L3); | ||
631 | *(pte_t *)pmd = pteval; | ||
632 | } | ||
633 | |||
634 | /* Set swapper_pgprot here so it is flushed to memory right away. */ | ||
635 | swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir); | ||
636 | |||
637 | /* | ||
638 | * Since we may be changing the caching of the stack and page | ||
639 | * table itself, we invoke an assembly helper to do the | ||
640 | * following steps: | ||
641 | * | ||
642 | * - flush the cache so we start with an empty slate | ||
643 | * - install pgtables[] as the real page table | ||
644 | * - flush the TLB so the new page table takes effect | ||
645 | */ | ||
646 | rc = flush_and_install_context(__pa(pgtables), | ||
647 | init_pgprot((unsigned long)pgtables), | ||
648 | __get_cpu_var(current_asid), | ||
649 | cpumask_bits(my_cpu_mask)); | ||
650 | BUG_ON(rc != 0); | ||
651 | |||
652 | /* Copy the page table back to the normal swapper_pg_dir. */ | ||
653 | memcpy(pgd_base, pgtables, sizeof(pgtables)); | ||
654 | __install_page_table(pgd_base, __get_cpu_var(current_asid), | ||
655 | swapper_pgprot); | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | ||
660 | * is valid. The argument is a physical page number. | ||
661 | * | ||
662 | * On Tile, the only valid things for which we can just hand out unchecked | ||
663 | * PTEs are the kernel code and data. Anything else might change its | ||
664 | * homing with time, and we wouldn't know to adjust the /dev/mem PTEs. | ||
665 | * Note that init_thread_union is released to heap soon after boot, | ||
666 | * so we include it in the init data. | ||
667 | * | ||
668 | * For TILE-Gx, we might want to consider allowing access to PA | ||
669 | * regions corresponding to PCI space, etc. | ||
670 | */ | ||
671 | int devmem_is_allowed(unsigned long pagenr) | ||
672 | { | ||
673 | return pagenr < kaddr_to_pfn(_end) && | ||
674 | !(pagenr >= kaddr_to_pfn(&init_thread_union) || | ||
675 | pagenr < kaddr_to_pfn(_einitdata)) && | ||
676 | !(pagenr >= kaddr_to_pfn(_sinittext) || | ||
677 | pagenr <= kaddr_to_pfn(_einittext-1)); | ||
678 | } | ||
679 | |||
680 | #ifdef CONFIG_HIGHMEM | ||
681 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | ||
682 | { | ||
683 | pgd_t *pgd; | ||
684 | pud_t *pud; | ||
685 | pmd_t *pmd; | ||
686 | pte_t *pte; | ||
687 | unsigned long vaddr; | ||
688 | |||
689 | vaddr = PKMAP_BASE; | ||
690 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | ||
691 | |||
692 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
693 | pud = pud_offset(pgd, vaddr); | ||
694 | pmd = pmd_offset(pud, vaddr); | ||
695 | pte = pte_offset_kernel(pmd, vaddr); | ||
696 | pkmap_page_table = pte; | ||
697 | } | ||
698 | #endif /* CONFIG_HIGHMEM */ | ||
699 | |||
700 | |||
701 | static void __init init_free_pfn_range(unsigned long start, unsigned long end) | ||
702 | { | ||
703 | unsigned long pfn; | ||
704 | struct page *page = pfn_to_page(start); | ||
705 | |||
706 | for (pfn = start; pfn < end; ) { | ||
707 | /* Optimize by freeing pages in large batches */ | ||
708 | int order = __ffs(pfn); | ||
709 | int count, i; | ||
710 | struct page *p; | ||
711 | |||
712 | if (order >= MAX_ORDER) | ||
713 | order = MAX_ORDER-1; | ||
714 | count = 1 << order; | ||
715 | while (pfn + count > end) { | ||
716 | count >>= 1; | ||
717 | --order; | ||
718 | } | ||
719 | for (p = page, i = 0; i < count; ++i, ++p) { | ||
720 | __ClearPageReserved(p); | ||
721 | /* | ||
722 | * Hacky direct set to avoid unnecessary | ||
723 | * lock take/release for EVERY page here. | ||
724 | */ | ||
725 | p->_count.counter = 0; | ||
726 | p->_mapcount.counter = -1; | ||
727 | } | ||
728 | init_page_count(page); | ||
729 | __free_pages(page, order); | ||
730 | totalram_pages += count; | ||
731 | |||
732 | page += count; | ||
733 | pfn += count; | ||
734 | } | ||
735 | } | ||
736 | |||
737 | static void __init set_non_bootmem_pages_init(void) | ||
738 | { | ||
739 | struct zone *z; | ||
740 | for_each_zone(z) { | ||
741 | unsigned long start, end; | ||
742 | int nid = z->zone_pgdat->node_id; | ||
743 | int idx = zone_idx(z); | ||
744 | |||
745 | start = z->zone_start_pfn; | ||
746 | if (start == 0) | ||
747 | continue; /* bootmem */ | ||
748 | end = start + z->spanned_pages; | ||
749 | if (idx == ZONE_NORMAL) { | ||
750 | BUG_ON(start != node_start_pfn[nid]); | ||
751 | start = node_free_pfn[nid]; | ||
752 | } | ||
753 | #ifdef CONFIG_HIGHMEM | ||
754 | if (idx == ZONE_HIGHMEM) | ||
755 | totalhigh_pages += z->spanned_pages; | ||
756 | #endif | ||
757 | if (kdata_huge) { | ||
758 | unsigned long percpu_pfn = node_percpu_pfn[nid]; | ||
759 | if (start < percpu_pfn && end > percpu_pfn) | ||
760 | end = percpu_pfn; | ||
761 | } | ||
762 | #ifdef CONFIG_PCI | ||
763 | if (start <= pci_reserve_start_pfn && | ||
764 | end > pci_reserve_start_pfn) { | ||
765 | if (end > pci_reserve_end_pfn) | ||
766 | init_free_pfn_range(pci_reserve_end_pfn, end); | ||
767 | end = pci_reserve_start_pfn; | ||
768 | } | ||
769 | #endif | ||
770 | init_free_pfn_range(start, end); | ||
771 | } | ||
772 | } | ||
773 | |||
774 | /* | ||
775 | * paging_init() sets up the page tables - note that all of lowmem is | ||
776 | * already mapped by head.S. | ||
777 | */ | ||
778 | void __init paging_init(void) | ||
779 | { | ||
780 | #ifdef CONFIG_HIGHMEM | ||
781 | unsigned long vaddr, end; | ||
782 | #endif | ||
783 | #ifdef __tilegx__ | ||
784 | pud_t *pud; | ||
785 | #endif | ||
786 | pgd_t *pgd_base = swapper_pg_dir; | ||
787 | |||
788 | kernel_physical_mapping_init(pgd_base); | ||
789 | |||
790 | #ifdef CONFIG_HIGHMEM | ||
791 | /* | ||
792 | * Fixed mappings, only the page table structure has to be | ||
793 | * created - mappings will be set by set_fixmap(): | ||
794 | */ | ||
795 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||
796 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | ||
797 | page_table_range_init(vaddr, end, pgd_base); | ||
798 | permanent_kmaps_init(pgd_base); | ||
799 | #endif | ||
800 | |||
801 | #ifdef __tilegx__ | ||
802 | /* | ||
803 | * Since GX allocates just one pmd_t array worth of vmalloc space, | ||
804 | * we go ahead and allocate it statically here, then share it | ||
805 | * globally. As a result we don't have to worry about any task | ||
806 | * changing init_mm once we get up and running, and there's no | ||
807 | * need for e.g. vmalloc_sync_all(). | ||
808 | */ | ||
809 | BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); | ||
810 | pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); | ||
811 | assign_pmd(pud, alloc_pmd()); | ||
812 | #endif | ||
813 | } | ||
814 | |||
815 | |||
816 | /* | ||
817 | * Walk the kernel page tables and derive the page_home() from | ||
818 | * the PTEs, so that set_pte() can properly validate the caching | ||
819 | * of all PTEs it sees. | ||
820 | */ | ||
821 | void __init set_page_homes(void) | ||
822 | { | ||
823 | } | ||
824 | |||
825 | static void __init set_max_mapnr_init(void) | ||
826 | { | ||
827 | #ifdef CONFIG_FLATMEM | ||
828 | max_mapnr = max_low_pfn; | ||
829 | #endif | ||
830 | } | ||
831 | |||
832 | void __init mem_init(void) | ||
833 | { | ||
834 | int codesize, datasize, initsize; | ||
835 | int i; | ||
836 | #ifndef __tilegx__ | ||
837 | void *last; | ||
838 | #endif | ||
839 | |||
840 | #ifdef CONFIG_FLATMEM | ||
841 | if (!mem_map) | ||
842 | BUG(); | ||
843 | #endif | ||
844 | |||
845 | #ifdef CONFIG_HIGHMEM | ||
846 | /* check that fixmap and pkmap do not overlap */ | ||
847 | if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { | ||
848 | pr_err("fixmap and kmap areas overlap" | ||
849 | " - this will crash\n"); | ||
850 | pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n", | ||
851 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), | ||
852 | FIXADDR_START); | ||
853 | BUG(); | ||
854 | } | ||
855 | #endif | ||
856 | |||
857 | set_max_mapnr_init(); | ||
858 | |||
859 | /* this will put all bootmem onto the freelists */ | ||
860 | totalram_pages += free_all_bootmem(); | ||
861 | |||
862 | /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ | ||
863 | set_non_bootmem_pages_init(); | ||
864 | |||
865 | codesize = (unsigned long)&_etext - (unsigned long)&_text; | ||
866 | datasize = (unsigned long)&_end - (unsigned long)&_sdata; | ||
867 | initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; | ||
868 | initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; | ||
869 | |||
870 | pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | ||
871 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
872 | num_physpages << (PAGE_SHIFT-10), | ||
873 | codesize >> 10, | ||
874 | datasize >> 10, | ||
875 | initsize >> 10, | ||
876 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | ||
877 | ); | ||
878 | |||
879 | /* | ||
880 | * In debug mode, dump some interesting memory mappings. | ||
881 | */ | ||
882 | #ifdef CONFIG_HIGHMEM | ||
883 | printk(KERN_DEBUG " KMAP %#lx - %#lx\n", | ||
884 | FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); | ||
885 | printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", | ||
886 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); | ||
887 | #endif | ||
888 | #ifdef CONFIG_HUGEVMAP | ||
889 | printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", | ||
890 | HUGE_VMAP_BASE, HUGE_VMAP_END - 1); | ||
891 | #endif | ||
892 | printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", | ||
893 | _VMALLOC_START, _VMALLOC_END - 1); | ||
894 | #ifdef __tilegx__ | ||
895 | for (i = MAX_NUMNODES-1; i >= 0; --i) { | ||
896 | struct pglist_data *node = &node_data[i]; | ||
897 | if (node->node_present_pages) { | ||
898 | unsigned long start = (unsigned long) | ||
899 | pfn_to_kaddr(node->node_start_pfn); | ||
900 | unsigned long end = start + | ||
901 | (node->node_present_pages << PAGE_SHIFT); | ||
902 | printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", | ||
903 | i, start, end - 1); | ||
904 | } | ||
905 | } | ||
906 | #else | ||
907 | last = high_memory; | ||
908 | for (i = MAX_NUMNODES-1; i >= 0; --i) { | ||
909 | if ((unsigned long)vbase_map[i] != -1UL) { | ||
910 | printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n", | ||
911 | i, (unsigned long) (vbase_map[i]), | ||
912 | (unsigned long) (last-1)); | ||
913 | last = vbase_map[i]; | ||
914 | } | ||
915 | } | ||
916 | #endif | ||
917 | |||
918 | #ifndef __tilegx__ | ||
919 | /* | ||
920 | * Convert from using one lock for all atomic operations to | ||
921 | * one per cpu. | ||
922 | */ | ||
923 | __init_atomic_per_cpu(); | ||
924 | #endif | ||
925 | } | ||
926 | |||
927 | /* | ||
928 | * this is for the non-NUMA, single node SMP system case. | ||
929 | * Specifically, in the case of x86, we will always add | ||
930 | * memory to the highmem for now. | ||
931 | */ | ||
932 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
933 | int arch_add_memory(u64 start, u64 size) | ||
934 | { | ||
935 | struct pglist_data *pgdata = &contig_page_data; | ||
936 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | ||
937 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
938 | unsigned long nr_pages = size >> PAGE_SHIFT; | ||
939 | |||
940 | return __add_pages(zone, start_pfn, nr_pages); | ||
941 | } | ||
942 | |||
943 | int remove_memory(u64 start, u64 size) | ||
944 | { | ||
945 | return -EINVAL; | ||
946 | } | ||
947 | #endif | ||
948 | |||
949 | struct kmem_cache *pgd_cache; | ||
950 | |||
951 | void __init pgtable_cache_init(void) | ||
952 | { | ||
953 | pgd_cache = kmem_cache_create("pgd", | ||
954 | PTRS_PER_PGD*sizeof(pgd_t), | ||
955 | PTRS_PER_PGD*sizeof(pgd_t), | ||
956 | 0, | ||
957 | NULL); | ||
958 | if (!pgd_cache) | ||
959 | panic("pgtable_cache_init(): Cannot create pgd cache"); | ||
960 | } | ||
961 | |||
962 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
963 | /* | ||
964 | * The __w1data area holds data that is only written during initialization, | ||
965 | * and is read-only and thus freely cacheable thereafter. Fix the page | ||
966 | * table entries that cover that region accordingly. | ||
967 | */ | ||
968 | static void mark_w1data_ro(void) | ||
969 | { | ||
970 | /* Loop over page table entries */ | ||
971 | unsigned long addr = (unsigned long)__w1data_begin; | ||
972 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); | ||
973 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { | ||
974 | unsigned long pfn = kaddr_to_pfn((void *)addr); | ||
975 | pte_t *ptep = virt_to_pte(NULL, addr); | ||
976 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ | ||
977 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); | ||
978 | } | ||
979 | } | ||
980 | #endif | ||
981 | |||
982 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
983 | static long __write_once initfree; | ||
984 | #else | ||
985 | static long __write_once initfree = 1; | ||
986 | #endif | ||
987 | |||
988 | /* Select whether to free (1) or mark unusable (0) the __init pages. */ | ||
989 | static int __init set_initfree(char *str) | ||
990 | { | ||
991 | strict_strtol(str, 0, &initfree); | ||
992 | pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); | ||
993 | return 1; | ||
994 | } | ||
995 | __setup("initfree=", set_initfree); | ||
996 | |||
997 | static void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
998 | { | ||
999 | unsigned long addr = (unsigned long) begin; | ||
1000 | |||
1001 | if (kdata_huge && !initfree) { | ||
1002 | pr_warning("Warning: ignoring initfree=0:" | ||
1003 | " incompatible with kdata=huge\n"); | ||
1004 | initfree = 1; | ||
1005 | } | ||
1006 | end = (end + PAGE_SIZE - 1) & PAGE_MASK; | ||
1007 | local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); | ||
1008 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
1009 | /* | ||
1010 | * Note we just reset the home here directly in the | ||
1011 | * page table. We know this is safe because our caller | ||
1012 | * just flushed the caches on all the other cpus, | ||
1013 | * and they won't be touching any of these pages. | ||
1014 | */ | ||
1015 | int pfn = kaddr_to_pfn((void *)addr); | ||
1016 | struct page *page = pfn_to_page(pfn); | ||
1017 | pte_t *ptep = virt_to_pte(NULL, addr); | ||
1018 | if (!initfree) { | ||
1019 | /* | ||
1020 | * If debugging page accesses then do not free | ||
1021 | * this memory but mark them not present - any | ||
1022 | * buggy init-section access will create a | ||
1023 | * kernel page fault: | ||
1024 | */ | ||
1025 | pte_clear(&init_mm, addr, ptep); | ||
1026 | continue; | ||
1027 | } | ||
1028 | __ClearPageReserved(page); | ||
1029 | init_page_count(page); | ||
1030 | if (pte_huge(*ptep)) | ||
1031 | BUG_ON(!kdata_huge); | ||
1032 | else | ||
1033 | set_pte_at(&init_mm, addr, ptep, | ||
1034 | pfn_pte(pfn, PAGE_KERNEL)); | ||
1035 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
1036 | free_page(addr); | ||
1037 | totalram_pages++; | ||
1038 | } | ||
1039 | pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | ||
1040 | } | ||
1041 | |||
1042 | void free_initmem(void) | ||
1043 | { | ||
1044 | const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET; | ||
1045 | |||
1046 | /* | ||
1047 | * Evict the dirty initdata on the boot cpu, evict the w1data | ||
1048 | * wherever it's homed, and evict all the init code everywhere. | ||
1049 | * We are guaranteed that no one will touch the init pages any | ||
1050 | * more, and although other cpus may be touching the w1data, | ||
1051 | * we only actually change the caching on tile64, which won't | ||
1052 | * be keeping local copies in the other tiles' caches anyway. | ||
1053 | */ | ||
1054 | homecache_evict(&cpu_cacheable_map); | ||
1055 | |||
1056 | /* Free the data pages that we won't use again after init. */ | ||
1057 | free_init_pages("unused kernel data", | ||
1058 | (unsigned long)_sinitdata, | ||
1059 | (unsigned long)_einitdata); | ||
1060 | |||
1061 | /* | ||
1062 | * Free the pages mapped from 0xc0000000 that correspond to code | ||
1063 | * pages from 0xfd000000 that we won't use again after init. | ||
1064 | */ | ||
1065 | free_init_pages("unused kernel text", | ||
1066 | (unsigned long)_sinittext - text_delta, | ||
1067 | (unsigned long)_einittext - text_delta); | ||
1068 | |||
1069 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
1070 | /* | ||
1071 | * Upgrade the .w1data section to globally cached. | ||
1072 | * We don't do this on tilepro, since the cache architecture | ||
1073 | * pretty much makes it irrelevant, and in any case we end | ||
1074 | * up having racing issues with other tiles that may touch | ||
1075 | * the data after we flush the cache but before we update | ||
1076 | * the PTEs and flush the TLBs, causing sharer shootdowns | ||
1077 | * later. Even though this is to clean data, it seems like | ||
1078 | * an unnecessary complication. | ||
1079 | */ | ||
1080 | mark_w1data_ro(); | ||
1081 | #endif | ||
1082 | |||
1083 | /* Do a global TLB flush so everyone sees the changes. */ | ||
1084 | flush_tlb_all(); | ||
1085 | } | ||
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h new file mode 100644 index 000000000000..cd45a0837fa6 --- /dev/null +++ b/arch/tile/mm/migrate.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Structure definitions for migration, exposed here for use by | ||
15 | * arch/tile/kernel/asm-offsets.c. | ||
16 | */ | ||
17 | |||
18 | #ifndef MM_MIGRATE_H | ||
19 | #define MM_MIGRATE_H | ||
20 | |||
21 | #include <linux/cpumask.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* | ||
25 | * This function is used as a helper when setting up the initial | ||
26 | * page table (swapper_pg_dir). | ||
27 | */ | ||
28 | extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access, | ||
29 | HV_ASID asid, | ||
30 | const unsigned long *cpumask); | ||
31 | |||
32 | /* | ||
33 | * This function supports migration as a "helper" as follows: | ||
34 | * | ||
35 | * - Set the stack PTE itself to "migrating". | ||
36 | * - Do a global TLB flush for (va,length) and the specified ASIDs. | ||
37 | * - Do a cache-evict on all necessary cpus. | ||
38 | * - Write the new stack PTE. | ||
39 | * | ||
40 | * Note that any non-NULL pointers must not point to the page that | ||
41 | * is handled by the stack_pte itself. | ||
42 | */ | ||
43 | extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va, | ||
44 | size_t length, pte_t *stack_ptep, | ||
45 | const struct cpumask *cache_cpumask, | ||
46 | const struct cpumask *tlb_cpumask, | ||
47 | HV_Remote_ASID *asids, | ||
48 | int asidcount); | ||
49 | |||
50 | #endif /* MM_MIGRATE_H */ | ||
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S new file mode 100644 index 000000000000..f738765cd1e6 --- /dev/null +++ b/arch/tile/mm/migrate_32.S | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This routine is a helper for migrating the home of a set of pages to | ||
15 | * a new cpu. See the documentation in homecache.c for more information. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/threads.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/types.h> | ||
22 | #include <asm/asm-offsets.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | .text | ||
26 | |||
27 | /* | ||
28 | * First, some definitions that apply to all the code in the file. | ||
29 | */ | ||
30 | |||
31 | /* Locals (caller-save) */ | ||
32 | #define r_tmp r10 | ||
33 | #define r_save_sp r11 | ||
34 | |||
35 | /* What we save where in the stack frame; must include all callee-saves. */ | ||
36 | #define FRAME_SP 4 | ||
37 | #define FRAME_R30 8 | ||
38 | #define FRAME_R31 12 | ||
39 | #define FRAME_R32 16 | ||
40 | #define FRAME_R33 20 | ||
41 | #define FRAME_R34 24 | ||
42 | #define FRAME_R35 28 | ||
43 | #define FRAME_SIZE 32 | ||
44 | |||
45 | |||
46 | |||
47 | |||
48 | /* | ||
49 | * On entry: | ||
50 | * | ||
51 | * r0 low word of the new context PA to install (moved to r_context_lo) | ||
52 | * r1 high word of the new context PA to install (moved to r_context_hi) | ||
53 | * r2 low word of PTE to use for context access (moved to r_access_lo) | ||
54 | * r3 high word of PTE to use for context access (moved to r_access_lo) | ||
55 | * r4 ASID to use for new context (moved to r_asid) | ||
56 | * r5 pointer to cpumask with just this cpu set in it (r_my_cpumask) | ||
57 | */ | ||
58 | |||
59 | /* Arguments (caller-save) */ | ||
60 | #define r_context_lo_in r0 | ||
61 | #define r_context_hi_in r1 | ||
62 | #define r_access_lo_in r2 | ||
63 | #define r_access_hi_in r3 | ||
64 | #define r_asid_in r4 | ||
65 | #define r_my_cpumask r5 | ||
66 | |||
67 | /* Locals (callee-save); must not be more than FRAME_xxx above. */ | ||
68 | #define r_save_ics r30 | ||
69 | #define r_context_lo r31 | ||
70 | #define r_context_hi r32 | ||
71 | #define r_access_lo r33 | ||
72 | #define r_access_hi r34 | ||
73 | #define r_asid r35 | ||
74 | |||
75 | STD_ENTRY(flush_and_install_context) | ||
76 | /* | ||
77 | * Create a stack frame; we can't touch it once we flush the | ||
78 | * cache until we install the new page table and flush the TLB. | ||
79 | */ | ||
80 | { | ||
81 | move r_save_sp, sp | ||
82 | sw sp, lr | ||
83 | addi sp, sp, -FRAME_SIZE | ||
84 | } | ||
85 | addi r_tmp, sp, FRAME_SP | ||
86 | { | ||
87 | sw r_tmp, r_save_sp | ||
88 | addi r_tmp, sp, FRAME_R30 | ||
89 | } | ||
90 | { | ||
91 | sw r_tmp, r30 | ||
92 | addi r_tmp, sp, FRAME_R31 | ||
93 | } | ||
94 | { | ||
95 | sw r_tmp, r31 | ||
96 | addi r_tmp, sp, FRAME_R32 | ||
97 | } | ||
98 | { | ||
99 | sw r_tmp, r32 | ||
100 | addi r_tmp, sp, FRAME_R33 | ||
101 | } | ||
102 | { | ||
103 | sw r_tmp, r33 | ||
104 | addi r_tmp, sp, FRAME_R34 | ||
105 | } | ||
106 | { | ||
107 | sw r_tmp, r34 | ||
108 | addi r_tmp, sp, FRAME_R35 | ||
109 | } | ||
110 | sw r_tmp, r35 | ||
111 | |||
112 | /* Move some arguments to callee-save registers. */ | ||
113 | { | ||
114 | move r_context_lo, r_context_lo_in | ||
115 | move r_context_hi, r_context_hi_in | ||
116 | } | ||
117 | { | ||
118 | move r_access_lo, r_access_lo_in | ||
119 | move r_access_hi, r_access_hi_in | ||
120 | } | ||
121 | move r_asid, r_asid_in | ||
122 | |||
123 | /* Disable interrupts, since we can't use our stack. */ | ||
124 | { | ||
125 | mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION | ||
126 | movei r_tmp, 1 | ||
127 | } | ||
128 | mtspr INTERRUPT_CRITICAL_SECTION, r_tmp | ||
129 | |||
130 | /* First, flush our L2 cache. */ | ||
131 | { | ||
132 | move r0, zero /* cache_pa */ | ||
133 | move r1, zero | ||
134 | } | ||
135 | { | ||
136 | auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */ | ||
137 | move r3, r_my_cpumask /* cache_cpumask */ | ||
138 | } | ||
139 | { | ||
140 | move r4, zero /* tlb_va */ | ||
141 | move r5, zero /* tlb_length */ | ||
142 | } | ||
143 | { | ||
144 | move r6, zero /* tlb_pgsize */ | ||
145 | move r7, zero /* tlb_cpumask */ | ||
146 | } | ||
147 | { | ||
148 | move r8, zero /* asids */ | ||
149 | move r9, zero /* asidcount */ | ||
150 | } | ||
151 | jal hv_flush_remote | ||
152 | bnz r0, .Ldone | ||
153 | |||
154 | /* Now install the new page table. */ | ||
155 | { | ||
156 | move r0, r_context_lo | ||
157 | move r1, r_context_hi | ||
158 | } | ||
159 | { | ||
160 | move r2, r_access_lo | ||
161 | move r3, r_access_hi | ||
162 | } | ||
163 | { | ||
164 | move r4, r_asid | ||
165 | movei r5, HV_CTX_DIRECTIO | ||
166 | } | ||
167 | jal hv_install_context | ||
168 | bnz r0, .Ldone | ||
169 | |||
170 | /* Finally, flush the TLB. */ | ||
171 | { | ||
172 | movei r0, 0 /* preserve_global */ | ||
173 | jal hv_flush_all | ||
174 | } | ||
175 | |||
176 | .Ldone: | ||
177 | /* Reset interrupts back how they were before. */ | ||
178 | mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics | ||
179 | |||
180 | /* Restore the callee-saved registers and return. */ | ||
181 | addli lr, sp, FRAME_SIZE | ||
182 | { | ||
183 | lw lr, lr | ||
184 | addli r_tmp, sp, FRAME_R30 | ||
185 | } | ||
186 | { | ||
187 | lw r30, r_tmp | ||
188 | addli r_tmp, sp, FRAME_R31 | ||
189 | } | ||
190 | { | ||
191 | lw r31, r_tmp | ||
192 | addli r_tmp, sp, FRAME_R32 | ||
193 | } | ||
194 | { | ||
195 | lw r32, r_tmp | ||
196 | addli r_tmp, sp, FRAME_R33 | ||
197 | } | ||
198 | { | ||
199 | lw r33, r_tmp | ||
200 | addli r_tmp, sp, FRAME_R34 | ||
201 | } | ||
202 | { | ||
203 | lw r34, r_tmp | ||
204 | addli r_tmp, sp, FRAME_R35 | ||
205 | } | ||
206 | { | ||
207 | lw r35, r_tmp | ||
208 | addi sp, sp, FRAME_SIZE | ||
209 | } | ||
210 | jrp lr | ||
211 | STD_ENDPROC(flush_and_install_context) | ||
diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c new file mode 100644 index 000000000000..f96f4cec602a --- /dev/null +++ b/arch/tile/mm/mmap.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Taken from the i386 architecture and simplified. | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/limits.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/mman.h> | ||
22 | #include <linux/compat.h> | ||
23 | |||
24 | /* | ||
25 | * Top of mmap area (just below the process stack). | ||
26 | * | ||
27 | * Leave an at least ~128 MB hole. | ||
28 | */ | ||
29 | #define MIN_GAP (128*1024*1024) | ||
30 | #define MAX_GAP (TASK_SIZE/6*5) | ||
31 | |||
32 | static inline unsigned long mmap_base(struct mm_struct *mm) | ||
33 | { | ||
34 | unsigned long gap = rlimit(RLIMIT_STACK); | ||
35 | unsigned long random_factor = 0; | ||
36 | |||
37 | if (current->flags & PF_RANDOMIZE) | ||
38 | random_factor = get_random_int() % (1024*1024); | ||
39 | |||
40 | if (gap < MIN_GAP) | ||
41 | gap = MIN_GAP; | ||
42 | else if (gap > MAX_GAP) | ||
43 | gap = MAX_GAP; | ||
44 | |||
45 | return PAGE_ALIGN(TASK_SIZE - gap - random_factor); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * This function, called very early during the creation of a new | ||
50 | * process VM image, sets up which VM layout function to use: | ||
51 | */ | ||
52 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
53 | { | ||
54 | #if !defined(__tilegx__) | ||
55 | int is_32bit = 1; | ||
56 | #elif defined(CONFIG_COMPAT) | ||
57 | int is_32bit = is_compat_task(); | ||
58 | #else | ||
59 | int is_32bit = 0; | ||
60 | #endif | ||
61 | |||
62 | /* | ||
63 | * Use standard layout if the expected stack growth is unlimited | ||
64 | * or we are running native 64 bits. | ||
65 | */ | ||
66 | if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { | ||
67 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
68 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
69 | mm->unmap_area = arch_unmap_area; | ||
70 | } else { | ||
71 | mm->mmap_base = mmap_base(mm); | ||
72 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
73 | mm->unmap_area = arch_unmap_area_topdown; | ||
74 | } | ||
75 | } | ||
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c new file mode 100644 index 000000000000..28c23140c947 --- /dev/null +++ b/arch/tile/mm/pgtable.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/swap.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/highmem.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/cpumask.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/smp.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/fixmap.h> | ||
35 | #include <asm/tlb.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/homecache.h> | ||
38 | |||
39 | #define K(x) ((x) << (PAGE_SHIFT-10)) | ||
40 | |||
41 | /* | ||
42 | * The normal show_free_areas() is too verbose on Tile, with dozens | ||
43 | * of processors and often four NUMA zones each with high and lowmem. | ||
44 | */ | ||
45 | void show_mem(void) | ||
46 | { | ||
47 | struct zone *zone; | ||
48 | |||
49 | pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" | ||
50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" | ||
51 | " pagecache:%lu swap:%lu\n", | ||
52 | (global_page_state(NR_ACTIVE_ANON) + | ||
53 | global_page_state(NR_ACTIVE_FILE)), | ||
54 | (global_page_state(NR_INACTIVE_ANON) + | ||
55 | global_page_state(NR_INACTIVE_FILE)), | ||
56 | global_page_state(NR_FILE_DIRTY), | ||
57 | global_page_state(NR_WRITEBACK), | ||
58 | global_page_state(NR_UNSTABLE_NFS), | ||
59 | global_page_state(NR_FREE_PAGES), | ||
60 | (global_page_state(NR_SLAB_RECLAIMABLE) + | ||
61 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | ||
62 | global_page_state(NR_FILE_MAPPED), | ||
63 | global_page_state(NR_PAGETABLE), | ||
64 | global_page_state(NR_BOUNCE), | ||
65 | global_page_state(NR_FILE_PAGES), | ||
66 | nr_swap_pages); | ||
67 | |||
68 | for_each_zone(zone) { | ||
69 | unsigned long flags, order, total = 0, largest_order = -1; | ||
70 | |||
71 | if (!populated_zone(zone)) | ||
72 | continue; | ||
73 | |||
74 | spin_lock_irqsave(&zone->lock, flags); | ||
75 | for (order = 0; order < MAX_ORDER; order++) { | ||
76 | int nr = zone->free_area[order].nr_free; | ||
77 | total += nr << order; | ||
78 | if (nr) | ||
79 | largest_order = order; | ||
80 | } | ||
81 | spin_unlock_irqrestore(&zone->lock, flags); | ||
82 | pr_err("Node %d %7s: %lukB (largest %luKb)\n", | ||
83 | zone_to_nid(zone), zone->name, | ||
84 | K(total), largest_order ? K(1UL) << largest_order : 0); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Associate a virtual page frame with a given physical page frame | ||
90 | * and protection flags for that frame. | ||
91 | */ | ||
92 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | ||
93 | { | ||
94 | pgd_t *pgd; | ||
95 | pud_t *pud; | ||
96 | pmd_t *pmd; | ||
97 | pte_t *pte; | ||
98 | |||
99 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
100 | if (pgd_none(*pgd)) { | ||
101 | BUG(); | ||
102 | return; | ||
103 | } | ||
104 | pud = pud_offset(pgd, vaddr); | ||
105 | if (pud_none(*pud)) { | ||
106 | BUG(); | ||
107 | return; | ||
108 | } | ||
109 | pmd = pmd_offset(pud, vaddr); | ||
110 | if (pmd_none(*pmd)) { | ||
111 | BUG(); | ||
112 | return; | ||
113 | } | ||
114 | pte = pte_offset_kernel(pmd, vaddr); | ||
115 | /* <pfn,flags> stored as-is, to permit clearing entries */ | ||
116 | set_pte(pte, pfn_pte(pfn, flags)); | ||
117 | |||
118 | /* | ||
119 | * It's enough to flush this one mapping. | ||
120 | * This appears conservative since it is only called | ||
121 | * from __set_fixmap. | ||
122 | */ | ||
123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); | ||
124 | } | ||
125 | |||
126 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | ||
127 | { | ||
128 | unsigned long address = __fix_to_virt(idx); | ||
129 | |||
130 | if (idx >= __end_of_fixed_addresses) { | ||
131 | BUG(); | ||
132 | return; | ||
133 | } | ||
134 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | ||
135 | } | ||
136 | |||
137 | #if defined(CONFIG_HIGHPTE) | ||
138 | pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) | ||
139 | { | ||
140 | pte_t *pte = kmap_atomic(pmd_page(*dir), type) + | ||
141 | (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; | ||
142 | return &pte[pte_index(address)]; | ||
143 | } | ||
144 | #endif | ||
145 | |||
146 | /* | ||
147 | * List of all pgd's needed so it can invalidate entries in both cached | ||
148 | * and uncached pgd's. This is essentially codepath-based locking | ||
149 | * against pageattr.c; it is the unique case in which a valid change | ||
150 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
151 | * vmalloc faults work because attached pagetables are never freed. | ||
152 | * The locking scheme was chosen on the basis of manfred's | ||
153 | * recommendations and having no core impact whatsoever. | ||
154 | * -- wli | ||
155 | */ | ||
156 | DEFINE_SPINLOCK(pgd_lock); | ||
157 | LIST_HEAD(pgd_list); | ||
158 | |||
159 | static inline void pgd_list_add(pgd_t *pgd) | ||
160 | { | ||
161 | list_add(pgd_to_list(pgd), &pgd_list); | ||
162 | } | ||
163 | |||
164 | static inline void pgd_list_del(pgd_t *pgd) | ||
165 | { | ||
166 | list_del(pgd_to_list(pgd)); | ||
167 | } | ||
168 | |||
169 | #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET) | ||
170 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START) | ||
171 | |||
172 | static void pgd_ctor(pgd_t *pgd) | ||
173 | { | ||
174 | unsigned long flags; | ||
175 | |||
176 | memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t)); | ||
177 | spin_lock_irqsave(&pgd_lock, flags); | ||
178 | |||
179 | #ifndef __tilegx__ | ||
180 | /* | ||
181 | * Check that the user interrupt vector has no L2. | ||
182 | * It never should for the swapper, and new page tables | ||
183 | * should always start with an empty user interrupt vector. | ||
184 | */ | ||
185 | BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0); | ||
186 | #endif | ||
187 | |||
188 | clone_pgd_range(pgd + KERNEL_PGD_INDEX_START, | ||
189 | swapper_pg_dir + KERNEL_PGD_INDEX_START, | ||
190 | KERNEL_PGD_PTRS); | ||
191 | |||
192 | pgd_list_add(pgd); | ||
193 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
194 | } | ||
195 | |||
196 | static void pgd_dtor(pgd_t *pgd) | ||
197 | { | ||
198 | unsigned long flags; /* can be called from interrupt context */ | ||
199 | |||
200 | spin_lock_irqsave(&pgd_lock, flags); | ||
201 | pgd_list_del(pgd); | ||
202 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
203 | } | ||
204 | |||
205 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
206 | { | ||
207 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | ||
208 | if (pgd) | ||
209 | pgd_ctor(pgd); | ||
210 | return pgd; | ||
211 | } | ||
212 | |||
213 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
214 | { | ||
215 | pgd_dtor(pgd); | ||
216 | kmem_cache_free(pgd_cache, pgd); | ||
217 | } | ||
218 | |||
219 | |||
220 | #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) | ||
221 | |||
222 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
223 | { | ||
224 | gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; | ||
225 | struct page *p; | ||
226 | |||
227 | #ifdef CONFIG_HIGHPTE | ||
228 | flags |= __GFP_HIGHMEM; | ||
229 | #endif | ||
230 | |||
231 | p = alloc_pages(flags, L2_USER_PGTABLE_ORDER); | ||
232 | if (p == NULL) | ||
233 | return NULL; | ||
234 | |||
235 | pgtable_page_ctor(p); | ||
236 | return p; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * Free page immediately (used in __pte_alloc if we raced with another | ||
241 | * process). We have to correct whatever pte_alloc_one() did before | ||
242 | * returning the pages to the allocator. | ||
243 | */ | ||
244 | void pte_free(struct mm_struct *mm, struct page *p) | ||
245 | { | ||
246 | pgtable_page_dtor(p); | ||
247 | __free_pages(p, L2_USER_PGTABLE_ORDER); | ||
248 | } | ||
249 | |||
250 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
251 | unsigned long address) | ||
252 | { | ||
253 | int i; | ||
254 | |||
255 | pgtable_page_dtor(pte); | ||
256 | tlb->need_flush = 1; | ||
257 | if (tlb_fast_mode(tlb)) { | ||
258 | struct page *pte_pages[L2_USER_PGTABLE_PAGES]; | ||
259 | for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) | ||
260 | pte_pages[i] = pte + i; | ||
261 | free_pages_and_swap_cache(pte_pages, L2_USER_PGTABLE_PAGES); | ||
262 | return; | ||
263 | } | ||
264 | for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) { | ||
265 | tlb->pages[tlb->nr++] = pte + i; | ||
266 | if (tlb->nr >= FREE_PTE_NR) | ||
267 | tlb_flush_mmu(tlb, 0, 0); | ||
268 | } | ||
269 | } | ||
270 | |||
271 | #ifndef __tilegx__ | ||
272 | |||
273 | /* | ||
274 | * FIXME: needs to be atomic vs hypervisor writes. For now we make the | ||
275 | * window of vulnerability a bit smaller by doing an unlocked 8-bit update. | ||
276 | */ | ||
277 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | ||
278 | unsigned long addr, pte_t *ptep) | ||
279 | { | ||
280 | #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16 | ||
281 | # error Code assumes HV_PTE "accessed" bit in second byte | ||
282 | #endif | ||
283 | u8 *tmp = (u8 *)ptep; | ||
284 | u8 second_byte = tmp[1]; | ||
285 | if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8)))) | ||
286 | return 0; | ||
287 | tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8)); | ||
288 | return 1; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * This implementation is atomic vs hypervisor writes, since the hypervisor | ||
293 | * always writes the low word (where "accessed" and "dirty" are) and this | ||
294 | * routine only writes the high word. | ||
295 | */ | ||
296 | void ptep_set_wrprotect(struct mm_struct *mm, | ||
297 | unsigned long addr, pte_t *ptep) | ||
298 | { | ||
299 | #if HV_PTE_INDEX_WRITABLE < 32 | ||
300 | # error Code assumes HV_PTE "writable" bit in high word | ||
301 | #endif | ||
302 | u32 *tmp = (u32 *)ptep; | ||
303 | tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32)); | ||
304 | } | ||
305 | |||
306 | #endif | ||
307 | |||
308 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) | ||
309 | { | ||
310 | pgd_t *pgd; | ||
311 | pud_t *pud; | ||
312 | pmd_t *pmd; | ||
313 | |||
314 | if (pgd_addr_invalid(addr)) | ||
315 | return NULL; | ||
316 | |||
317 | pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr); | ||
318 | pud = pud_offset(pgd, addr); | ||
319 | if (!pud_present(*pud)) | ||
320 | return NULL; | ||
321 | pmd = pmd_offset(pud, addr); | ||
322 | if (pmd_huge_page(*pmd)) | ||
323 | return (pte_t *)pmd; | ||
324 | if (!pmd_present(*pmd)) | ||
325 | return NULL; | ||
326 | return pte_offset_kernel(pmd, addr); | ||
327 | } | ||
328 | |||
329 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) | ||
330 | { | ||
331 | unsigned int width = smp_width; | ||
332 | int x = cpu % width; | ||
333 | int y = cpu / width; | ||
334 | BUG_ON(y >= smp_height); | ||
335 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | ||
336 | BUG_ON(cpu < 0 || cpu >= NR_CPUS); | ||
337 | BUG_ON(!cpu_is_valid_lotar(cpu)); | ||
338 | return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y)); | ||
339 | } | ||
340 | |||
341 | int get_remote_cache_cpu(pgprot_t prot) | ||
342 | { | ||
343 | HV_LOTAR lotar = hv_pte_get_lotar(prot); | ||
344 | int x = HV_LOTAR_X(lotar); | ||
345 | int y = HV_LOTAR_Y(lotar); | ||
346 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | ||
347 | return x + y * smp_width; | ||
348 | } | ||
349 | |||
350 | void set_pte_order(pte_t *ptep, pte_t pte, int order) | ||
351 | { | ||
352 | unsigned long pfn = pte_pfn(pte); | ||
353 | struct page *page = pfn_to_page(pfn); | ||
354 | |||
355 | /* Update the home of a PTE if necessary */ | ||
356 | pte = pte_set_home(pte, page_home(page)); | ||
357 | |||
358 | #ifdef __tilegx__ | ||
359 | *ptep = pte; | ||
360 | #else | ||
361 | /* | ||
362 | * When setting a PTE, write the high bits first, then write | ||
363 | * the low bits. This sets the "present" bit only after the | ||
364 | * other bits are in place. If a particular PTE update | ||
365 | * involves transitioning from one valid PTE to another, it | ||
366 | * may be necessary to call set_pte_order() more than once, | ||
367 | * transitioning via a suitable intermediate state. | ||
368 | * Note that this sequence also means that if we are transitioning | ||
369 | * from any migrating PTE to a non-migrating one, we will not | ||
370 | * see a half-updated PTE with the migrating bit off. | ||
371 | */ | ||
372 | #if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32 | ||
373 | # error Must write the present and migrating bits last | ||
374 | #endif | ||
375 | ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); | ||
376 | barrier(); | ||
377 | ((u32 *)ptep)[0] = (u32)(pte_val(pte)); | ||
378 | #endif | ||
379 | } | ||
380 | |||
381 | /* Can this mm load a PTE with cached_priority set? */ | ||
382 | static inline int mm_is_priority_cached(struct mm_struct *mm) | ||
383 | { | ||
384 | return mm->context.priority_cached; | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * Add a priority mapping to an mm_context and | ||
389 | * notify the hypervisor if this is the first one. | ||
390 | */ | ||
391 | void start_mm_caching(struct mm_struct *mm) | ||
392 | { | ||
393 | if (!mm_is_priority_cached(mm)) { | ||
394 | mm->context.priority_cached = -1U; | ||
395 | hv_set_caching(-1U); | ||
396 | } | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Validate and return the priority_cached flag. We know if it's zero | ||
401 | * that we don't need to scan, since we immediately set it non-zero | ||
402 | * when we first consider a MAP_CACHE_PRIORITY mapping. | ||
403 | * | ||
404 | * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it, | ||
405 | * since we're in an interrupt context (servicing switch_mm) we don't | ||
406 | * worry about it and don't unset the "priority_cached" field. | ||
407 | * Presumably we'll come back later and have more luck and clear | ||
408 | * the value then; for now we'll just keep the cache marked for priority. | ||
409 | */ | ||
410 | static unsigned int update_priority_cached(struct mm_struct *mm) | ||
411 | { | ||
412 | if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { | ||
413 | struct vm_area_struct *vm; | ||
414 | for (vm = mm->mmap; vm; vm = vm->vm_next) { | ||
415 | if (hv_pte_get_cached_priority(vm->vm_page_prot)) | ||
416 | break; | ||
417 | } | ||
418 | if (vm == NULL) | ||
419 | mm->context.priority_cached = 0; | ||
420 | up_write(&mm->mmap_sem); | ||
421 | } | ||
422 | return mm->context.priority_cached; | ||
423 | } | ||
424 | |||
425 | /* Set caching correctly for an mm that we are switching to. */ | ||
426 | void check_mm_caching(struct mm_struct *prev, struct mm_struct *next) | ||
427 | { | ||
428 | if (!mm_is_priority_cached(next)) { | ||
429 | /* | ||
430 | * If the new mm doesn't use priority caching, just see if we | ||
431 | * need the hv_set_caching(), or can assume it's already zero. | ||
432 | */ | ||
433 | if (mm_is_priority_cached(prev)) | ||
434 | hv_set_caching(0); | ||
435 | } else { | ||
436 | hv_set_caching(update_priority_cached(next)); | ||
437 | } | ||
438 | } | ||
439 | |||
440 | #if CHIP_HAS_MMIO() | ||
441 | |||
442 | /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */ | ||
443 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | ||
444 | pgprot_t home) | ||
445 | { | ||
446 | void *addr; | ||
447 | struct vm_struct *area; | ||
448 | unsigned long offset, last_addr; | ||
449 | pgprot_t pgprot; | ||
450 | |||
451 | /* Don't allow wraparound or zero size */ | ||
452 | last_addr = phys_addr + size - 1; | ||
453 | if (!size || last_addr < phys_addr) | ||
454 | return NULL; | ||
455 | |||
456 | /* Create a read/write, MMIO VA mapping homed at the requested shim. */ | ||
457 | pgprot = PAGE_KERNEL; | ||
458 | pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO); | ||
459 | pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home)); | ||
460 | |||
461 | /* | ||
462 | * Mappings have to be page-aligned | ||
463 | */ | ||
464 | offset = phys_addr & ~PAGE_MASK; | ||
465 | phys_addr &= PAGE_MASK; | ||
466 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
467 | |||
468 | /* | ||
469 | * Ok, go for it.. | ||
470 | */ | ||
471 | area = get_vm_area(size, VM_IOREMAP /* | other flags? */); | ||
472 | if (!area) | ||
473 | return NULL; | ||
474 | area->phys_addr = phys_addr; | ||
475 | addr = area->addr; | ||
476 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | ||
477 | phys_addr, pgprot)) { | ||
478 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | ||
479 | return NULL; | ||
480 | } | ||
481 | return (__force void __iomem *) (offset + (char *)addr); | ||
482 | } | ||
483 | EXPORT_SYMBOL(ioremap_prot); | ||
484 | |||
485 | /* Map a PCI MMIO bus address into VA space. */ | ||
486 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | ||
487 | { | ||
488 | panic("ioremap for PCI MMIO is not supported"); | ||
489 | } | ||
490 | EXPORT_SYMBOL(ioremap); | ||
491 | |||
492 | /* Unmap an MMIO VA mapping. */ | ||
493 | void iounmap(volatile void __iomem *addr_in) | ||
494 | { | ||
495 | volatile void __iomem *addr = (volatile void __iomem *) | ||
496 | (PAGE_MASK & (unsigned long __force)addr_in); | ||
497 | #if 1 | ||
498 | vunmap((void * __force)addr); | ||
499 | #else | ||
500 | /* x86 uses this complicated flow instead of vunmap(). Is | ||
501 | * there any particular reason we should do the same? */ | ||
502 | struct vm_struct *p, *o; | ||
503 | |||
504 | /* Use the vm area unlocked, assuming the caller | ||
505 | ensures there isn't another iounmap for the same address | ||
506 | in parallel. Reuse of the virtual address is prevented by | ||
507 | leaving it in the global lists until we're done with it. | ||
508 | cpa takes care of the direct mappings. */ | ||
509 | read_lock(&vmlist_lock); | ||
510 | for (p = vmlist; p; p = p->next) { | ||
511 | if (p->addr == addr) | ||
512 | break; | ||
513 | } | ||
514 | read_unlock(&vmlist_lock); | ||
515 | |||
516 | if (!p) { | ||
517 | pr_err("iounmap: bad address %p\n", addr); | ||
518 | dump_stack(); | ||
519 | return; | ||
520 | } | ||
521 | |||
522 | /* Finally remove it */ | ||
523 | o = remove_vm_area((void *)addr); | ||
524 | BUG_ON(p != o || o == NULL); | ||
525 | kfree(p); | ||
526 | #endif | ||
527 | } | ||
528 | EXPORT_SYMBOL(iounmap); | ||
529 | |||
530 | #endif /* CHIP_HAS_MMIO() */ | ||