diff options
Diffstat (limited to 'arch/tile')
199 files changed, 49495 insertions, 0 deletions
diff --git a/arch/tile/Kbuild b/arch/tile/Kbuild new file mode 100644 index 000000000000..a9b922716092 --- /dev/null +++ b/arch/tile/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | |||
2 | obj-y += kernel/ | ||
3 | obj-y += mm/ | ||
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig new file mode 100644 index 000000000000..290ef4161939 --- /dev/null +++ b/arch/tile/Kconfig | |||
@@ -0,0 +1,352 @@ | |||
1 | # For a description of the syntax of this configuration file, | ||
2 | # see Documentation/kbuild/config-language.txt. | ||
3 | |||
4 | config MMU | ||
5 | def_bool y | ||
6 | |||
7 | config GENERIC_CSUM | ||
8 | def_bool y | ||
9 | |||
10 | config GENERIC_HARDIRQS | ||
11 | def_bool y | ||
12 | |||
13 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
14 | def_bool y | ||
15 | |||
16 | config GENERIC_IRQ_PROBE | ||
17 | def_bool y | ||
18 | |||
19 | config GENERIC_PENDING_IRQ | ||
20 | def_bool y | ||
21 | depends on GENERIC_HARDIRQS && SMP | ||
22 | |||
23 | config SEMAPHORE_SLEEPERS | ||
24 | def_bool y | ||
25 | |||
26 | config HAVE_ARCH_ALLOC_REMAP | ||
27 | def_bool y | ||
28 | |||
29 | config HAVE_SETUP_PER_CPU_AREA | ||
30 | def_bool y | ||
31 | |||
32 | config NEED_PER_CPU_PAGE_FIRST_CHUNK | ||
33 | def_bool y | ||
34 | |||
35 | config SYS_SUPPORTS_HUGETLBFS | ||
36 | def_bool y | ||
37 | |||
38 | config GENERIC_TIME | ||
39 | def_bool y | ||
40 | |||
41 | config GENERIC_CLOCKEVENTS | ||
42 | def_bool y | ||
43 | |||
44 | # FIXME: tilegx can implement a more efficent rwsem. | ||
45 | config RWSEM_GENERIC_SPINLOCK | ||
46 | def_bool y | ||
47 | |||
48 | # We have a very flat architecture from a migration point of view, | ||
49 | # so save boot time by presetting this (particularly useful on tile-sim). | ||
50 | config DEFAULT_MIGRATION_COST | ||
51 | int | ||
52 | default "10000000" | ||
53 | |||
54 | # We only support gcc 4.4 and above, so this should work. | ||
55 | config ARCH_SUPPORTS_OPTIMIZED_INLINING | ||
56 | def_bool y | ||
57 | |||
58 | config ARCH_PHYS_ADDR_T_64BIT | ||
59 | def_bool y | ||
60 | |||
61 | config LOCKDEP_SUPPORT | ||
62 | def_bool y | ||
63 | |||
64 | config STACKTRACE_SUPPORT | ||
65 | def_bool y | ||
66 | select STACKTRACE | ||
67 | |||
68 | # We use discontigmem for now; at some point we may want to switch | ||
69 | # to sparsemem (Tilera bug 7996). | ||
70 | config ARCH_DISCONTIGMEM_ENABLE | ||
71 | def_bool y | ||
72 | |||
73 | config ARCH_DISCONTIGMEM_DEFAULT | ||
74 | def_bool y | ||
75 | |||
76 | config TRACE_IRQFLAGS_SUPPORT | ||
77 | def_bool y | ||
78 | |||
79 | config STRICT_DEVMEM | ||
80 | def_bool y | ||
81 | |||
82 | # SMP is required for Tilera Linux. | ||
83 | config SMP | ||
84 | def_bool y | ||
85 | |||
86 | # Allow checking for compile-time determined overflow errors in | ||
87 | # copy_from_user(). There are still unprovable places in the | ||
88 | # generic code as of 2.6.34, so this option is not really compatible | ||
89 | # with -Werror, which is more useful in general. | ||
90 | config DEBUG_COPY_FROM_USER | ||
91 | def_bool n | ||
92 | |||
93 | config HVC_TILE | ||
94 | select HVC_DRIVER | ||
95 | def_bool y | ||
96 | |||
97 | config TILE | ||
98 | def_bool y | ||
99 | select GENERIC_FIND_FIRST_BIT | ||
100 | select GENERIC_FIND_NEXT_BIT | ||
101 | select USE_GENERIC_SMP_HELPERS | ||
102 | select CC_OPTIMIZE_FOR_SIZE | ||
103 | |||
104 | # FIXME: investigate whether we need/want these options. | ||
105 | # select HAVE_IOREMAP_PROT | ||
106 | # select HAVE_OPTPROBES | ||
107 | # select HAVE_REGS_AND_STACK_ACCESS_API | ||
108 | # select HAVE_HW_BREAKPOINT | ||
109 | # select PERF_EVENTS | ||
110 | # select HAVE_USER_RETURN_NOTIFIER | ||
111 | # config NO_BOOTMEM | ||
112 | # config ARCH_SUPPORTS_DEBUG_PAGEALLOC | ||
113 | # config HUGETLB_PAGE_SIZE_VARIABLE | ||
114 | |||
115 | |||
116 | mainmenu "Linux/TILE Kernel Configuration" | ||
117 | |||
118 | # Please note: TILE-Gx support is not yet finalized; this is | ||
119 | # the preliminary support. TILE-Gx drivers are only provided | ||
120 | # with the alpha or beta test versions for Tilera customers. | ||
121 | config TILEGX | ||
122 | depends on EXPERIMENTAL | ||
123 | bool "Building with TILE-Gx (64-bit) compiler and toolchain" | ||
124 | |||
125 | config 64BIT | ||
126 | depends on TILEGX | ||
127 | def_bool y | ||
128 | |||
129 | config ARCH_DEFCONFIG | ||
130 | string | ||
131 | default "arch/tile/configs/tile_defconfig" if !TILEGX | ||
132 | default "arch/tile/configs/tilegx_defconfig" if TILEGX | ||
133 | |||
134 | source "init/Kconfig" | ||
135 | |||
136 | menu "Tilera-specific configuration" | ||
137 | |||
138 | config NR_CPUS | ||
139 | int "Maximum number of tiles (2-255)" | ||
140 | range 2 255 | ||
141 | depends on SMP | ||
142 | default "64" | ||
143 | ---help--- | ||
144 | Building with 64 is the recommended value, but a slightly | ||
145 | smaller kernel memory footprint results from using a smaller | ||
146 | value on chips with fewer tiles. | ||
147 | |||
148 | source "kernel/time/Kconfig" | ||
149 | |||
150 | source "kernel/Kconfig.hz" | ||
151 | |||
152 | config KEXEC | ||
153 | bool "kexec system call" | ||
154 | ---help--- | ||
155 | kexec is a system call that implements the ability to shutdown your | ||
156 | current kernel, and to start another kernel. It is like a reboot | ||
157 | but it is independent of the system firmware. It is used | ||
158 | to implement the "mboot" Tilera booter. | ||
159 | |||
160 | The name comes from the similarity to the exec system call. | ||
161 | |||
162 | config COMPAT | ||
163 | bool "Support 32-bit TILE-Gx binaries in addition to 64-bit" | ||
164 | depends on TILEGX | ||
165 | select COMPAT_BINFMT_ELF | ||
166 | default y | ||
167 | ---help--- | ||
168 | If enabled, the kernel will support running TILE-Gx binaries | ||
169 | that were built with the -m32 option. | ||
170 | |||
171 | config SYSVIPC_COMPAT | ||
172 | def_bool y | ||
173 | depends on COMPAT && SYSVIPC | ||
174 | |||
175 | # We do not currently support disabling HIGHMEM on tile64 and tilepro. | ||
176 | config HIGHMEM | ||
177 | bool # "Support for more than 512 MB of RAM" | ||
178 | default !TILEGX | ||
179 | ---help--- | ||
180 | Linux can use the full amount of RAM in the system by | ||
181 | default. However, the address space of TILE processors is | ||
182 | only 4 Gigabytes large. That means that, if you have a large | ||
183 | amount of physical memory, not all of it can be "permanently | ||
184 | mapped" by the kernel. The physical memory that's not | ||
185 | permanently mapped is called "high memory". | ||
186 | |||
187 | If you are compiling a kernel which will never run on a | ||
188 | machine with more than 512 MB total physical RAM, answer | ||
189 | "false" here. This will result in the kernel mapping all of | ||
190 | physical memory into the top 1 GB of virtual memory space. | ||
191 | |||
192 | If unsure, say "true". | ||
193 | |||
194 | # We do not currently support disabling NUMA. | ||
195 | config NUMA | ||
196 | bool # "NUMA Memory Allocation and Scheduler Support" | ||
197 | depends on SMP && DISCONTIGMEM | ||
198 | default y | ||
199 | ---help--- | ||
200 | NUMA memory allocation is required for TILE processors | ||
201 | unless booting with memory striping enabled in the | ||
202 | hypervisor, or with only a single memory controller. | ||
203 | It is recommended that this option always be enabled. | ||
204 | |||
205 | config NODES_SHIFT | ||
206 | int "Log base 2 of the max number of memory controllers" | ||
207 | default 2 | ||
208 | depends on NEED_MULTIPLE_NODES | ||
209 | ---help--- | ||
210 | By default, 2, i.e. 2^2 == 4 DDR2 controllers. | ||
211 | In a system with more controllers, this value should be raised. | ||
212 | |||
213 | # Need 16MB areas to enable hugetlb | ||
214 | # See build-time check in arch/tile/mm/init.c. | ||
215 | config FORCE_MAX_ZONEORDER | ||
216 | int | ||
217 | default 9 | ||
218 | |||
219 | choice | ||
220 | depends on !TILEGX | ||
221 | prompt "Memory split" if EMBEDDED | ||
222 | default VMSPLIT_3G | ||
223 | ---help--- | ||
224 | Select the desired split between kernel and user memory. | ||
225 | |||
226 | If the address range available to the kernel is less than the | ||
227 | physical memory installed, the remaining memory will be available | ||
228 | as "high memory". Accessing high memory is a little more costly | ||
229 | than low memory, as it needs to be mapped into the kernel first. | ||
230 | Note that increasing the kernel address space limits the range | ||
231 | available to user programs, making the address space there | ||
232 | tighter. Selecting anything other than the default 3G/1G split | ||
233 | will also likely make your kernel incompatible with binary-only | ||
234 | kernel modules. | ||
235 | |||
236 | If you are not absolutely sure what you are doing, leave this | ||
237 | option alone! | ||
238 | |||
239 | config VMSPLIT_375G | ||
240 | bool "3.75G/0.25G user/kernel split (no kernel networking)" | ||
241 | config VMSPLIT_35G | ||
242 | bool "3.5G/0.5G user/kernel split" | ||
243 | config VMSPLIT_3G | ||
244 | bool "3G/1G user/kernel split" | ||
245 | config VMSPLIT_3G_OPT | ||
246 | bool "3G/1G user/kernel split (for full 1G low memory)" | ||
247 | config VMSPLIT_2G | ||
248 | bool "2G/2G user/kernel split" | ||
249 | config VMSPLIT_1G | ||
250 | bool "1G/3G user/kernel split" | ||
251 | endchoice | ||
252 | |||
253 | config PAGE_OFFSET | ||
254 | hex | ||
255 | default 0xF0000000 if VMSPLIT_375G | ||
256 | default 0xE0000000 if VMSPLIT_35G | ||
257 | default 0xB0000000 if VMSPLIT_3G_OPT | ||
258 | default 0x80000000 if VMSPLIT_2G | ||
259 | default 0x40000000 if VMSPLIT_1G | ||
260 | default 0xC0000000 | ||
261 | |||
262 | source "mm/Kconfig" | ||
263 | |||
264 | config CMDLINE_BOOL | ||
265 | bool "Built-in kernel command line" | ||
266 | default n | ||
267 | ---help--- | ||
268 | Allow for specifying boot arguments to the kernel at | ||
269 | build time. On some systems (e.g. embedded ones), it is | ||
270 | necessary or convenient to provide some or all of the | ||
271 | kernel boot arguments with the kernel itself (that is, | ||
272 | to not rely on the boot loader to provide them.) | ||
273 | |||
274 | To compile command line arguments into the kernel, | ||
275 | set this option to 'Y', then fill in the | ||
276 | the boot arguments in CONFIG_CMDLINE. | ||
277 | |||
278 | Systems with fully functional boot loaders (e.g. mboot, or | ||
279 | if booting over PCI) should leave this option set to 'N'. | ||
280 | |||
281 | config CMDLINE | ||
282 | string "Built-in kernel command string" | ||
283 | depends on CMDLINE_BOOL | ||
284 | default "" | ||
285 | ---help--- | ||
286 | Enter arguments here that should be compiled into the kernel | ||
287 | image and used at boot time. If the boot loader provides a | ||
288 | command line at boot time, it is appended to this string to | ||
289 | form the full kernel command line, when the system boots. | ||
290 | |||
291 | However, you can use the CONFIG_CMDLINE_OVERRIDE option to | ||
292 | change this behavior. | ||
293 | |||
294 | In most cases, the command line (whether built-in or provided | ||
295 | by the boot loader) should specify the device for the root | ||
296 | file system. | ||
297 | |||
298 | config CMDLINE_OVERRIDE | ||
299 | bool "Built-in command line overrides boot loader arguments" | ||
300 | default n | ||
301 | depends on CMDLINE_BOOL | ||
302 | ---help--- | ||
303 | Set this option to 'Y' to have the kernel ignore the boot loader | ||
304 | command line, and use ONLY the built-in command line. | ||
305 | |||
306 | This is used to work around broken boot loaders. This should | ||
307 | be set to 'N' under normal conditions. | ||
308 | |||
309 | config VMALLOC_RESERVE | ||
310 | hex | ||
311 | default 0x1000000 | ||
312 | |||
313 | endmenu # Tilera-specific configuration | ||
314 | |||
315 | menu "Bus options" | ||
316 | |||
317 | config NO_IOMEM | ||
318 | def_bool !PCI | ||
319 | |||
320 | config NO_IOPORT | ||
321 | def_bool !PCI | ||
322 | |||
323 | source "drivers/pci/Kconfig" | ||
324 | |||
325 | source "drivers/pci/hotplug/Kconfig" | ||
326 | |||
327 | endmenu | ||
328 | |||
329 | menu "Executable file formats" | ||
330 | |||
331 | # only elf supported | ||
332 | config KCORE_ELF | ||
333 | def_bool y | ||
334 | depends on PROC_FS | ||
335 | |||
336 | source "fs/Kconfig.binfmt" | ||
337 | |||
338 | endmenu | ||
339 | |||
340 | source "net/Kconfig" | ||
341 | |||
342 | source "drivers/Kconfig" | ||
343 | |||
344 | source "fs/Kconfig" | ||
345 | |||
346 | source "arch/tile/Kconfig.debug" | ||
347 | |||
348 | source "security/Kconfig" | ||
349 | |||
350 | source "crypto/Kconfig" | ||
351 | |||
352 | source "lib/Kconfig" | ||
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug new file mode 100644 index 000000000000..a81f0fbf7e60 --- /dev/null +++ b/arch/tile/Kconfig.debug | |||
@@ -0,0 +1,43 @@ | |||
1 | menu "Kernel hacking" | ||
2 | |||
3 | source "lib/Kconfig.debug" | ||
4 | |||
5 | config EARLY_PRINTK | ||
6 | bool "Early printk" if EMBEDDED && DEBUG_KERNEL | ||
7 | default y | ||
8 | help | ||
9 | Write kernel log output directly via the hypervisor console. | ||
10 | |||
11 | This is useful for kernel debugging when your machine crashes very | ||
12 | early before the console code is initialized. For normal operation | ||
13 | it is not recommended because it looks ugly and doesn't cooperate | ||
14 | with klogd/syslogd. You should normally N here, | ||
15 | unless you want to debug such a crash. | ||
16 | |||
17 | config DEBUG_STACKOVERFLOW | ||
18 | bool "Check for stack overflows" | ||
19 | depends on DEBUG_KERNEL | ||
20 | help | ||
21 | This option will cause messages to be printed if free stack space | ||
22 | drops below a certain limit. | ||
23 | |||
24 | config DEBUG_STACK_USAGE | ||
25 | bool "Stack utilization instrumentation" | ||
26 | depends on DEBUG_KERNEL | ||
27 | help | ||
28 | Enables the display of the minimum amount of free stack which each | ||
29 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
30 | |||
31 | This option will slow down process creation somewhat. | ||
32 | |||
33 | config DEBUG_EXTRA_FLAGS | ||
34 | string "Additional compiler arguments when building with '-g'" | ||
35 | depends on DEBUG_INFO | ||
36 | default "" | ||
37 | help | ||
38 | Debug info can be large, and flags like | ||
39 | `-femit-struct-debug-baseonly' can reduce the kernel file | ||
40 | size and build time noticeably. Such flags are often | ||
41 | helpful if the main use of debug info is line number info. | ||
42 | |||
43 | endmenu | ||
diff --git a/arch/tile/Makefile b/arch/tile/Makefile new file mode 100644 index 000000000000..07c4318c0629 --- /dev/null +++ b/arch/tile/Makefile | |||
@@ -0,0 +1,52 @@ | |||
1 | # | ||
2 | # This file is subject to the terms and conditions of the GNU General Public | ||
3 | # License. See the file "COPYING" in the main directory of this archive | ||
4 | # for more details. | ||
5 | # | ||
6 | # This file is included by the global makefile so that you can add your own | ||
7 | # architecture-specific flags and dependencies. Remember to do have actions | ||
8 | # for "archclean" and "archdep" for cleaning up and making dependencies for | ||
9 | # this architecture | ||
10 | |||
11 | ifeq ($(CROSS_COMPILE),) | ||
12 | # If building with TILERA_ROOT set (i.e. using the Tilera Multicore | ||
13 | # Development Environment) we can set CROSS_COMPILE based on that. | ||
14 | ifdef TILERA_ROOT | ||
15 | CROSS_COMPILE = $(TILERA_ROOT)/bin/tile- | ||
16 | endif | ||
17 | endif | ||
18 | |||
19 | # If we're not cross-compiling, make sure we're on the right architecture. | ||
20 | ifeq ($(CROSS_COMPILE),) | ||
21 | HOST_ARCH = $(shell uname -m) | ||
22 | ifneq ($(HOST_ARCH),$(ARCH)) | ||
23 | $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH)) | ||
24 | endif | ||
25 | endif | ||
26 | |||
27 | |||
28 | KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) | ||
29 | |||
30 | LIBGCC_PATH := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | ||
31 | |||
32 | # Provide the path to use for "make defconfig". | ||
33 | KBUILD_DEFCONFIG := $(ARCH)_defconfig | ||
34 | |||
35 | # Used as a file extension when useful, e.g. head_$(BITS).o | ||
36 | # Not needed for (e.g.) "$(CC) -m32" since the compiler automatically | ||
37 | # uses the right default anyway. | ||
38 | export BITS | ||
39 | ifeq ($(CONFIG_TILEGX),y) | ||
40 | BITS := 64 | ||
41 | else | ||
42 | BITS := 32 | ||
43 | endif | ||
44 | |||
45 | head-y := arch/tile/kernel/head_$(BITS).o | ||
46 | |||
47 | libs-y += arch/tile/lib/ | ||
48 | libs-y += $(LIBGCC_PATH) | ||
49 | |||
50 | |||
51 | # See arch/tile/Kbuild for content of core part of the kernel | ||
52 | core-y += arch/tile/ | ||
diff --git a/arch/tile/configs/tile_defconfig b/arch/tile/configs/tile_defconfig new file mode 100644 index 000000000000..74a5be39e8f2 --- /dev/null +++ b/arch/tile/configs/tile_defconfig | |||
@@ -0,0 +1,1289 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.34 | ||
4 | # Fri May 28 17:51:43 2010 | ||
5 | # | ||
6 | CONFIG_MMU=y | ||
7 | CONFIG_GENERIC_CSUM=y | ||
8 | CONFIG_GENERIC_HARDIRQS=y | ||
9 | CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y | ||
10 | CONFIG_GENERIC_IRQ_PROBE=y | ||
11 | CONFIG_GENERIC_PENDING_IRQ=y | ||
12 | CONFIG_ZONE_DMA=y | ||
13 | CONFIG_SEMAPHORE_SLEEPERS=y | ||
14 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | ||
15 | CONFIG_HAVE_ARCH_ALLOC_REMAP=y | ||
16 | CONFIG_HAVE_SETUP_PER_CPU_AREA=y | ||
17 | CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y | ||
18 | CONFIG_SYS_SUPPORTS_HUGETLBFS=y | ||
19 | CONFIG_GENERIC_TIME=y | ||
20 | CONFIG_GENERIC_CLOCKEVENTS=y | ||
21 | CONFIG_CLOCKSOURCE_WATCHDOG=y | ||
22 | CONFIG_RWSEM_GENERIC_SPINLOCK=y | ||
23 | CONFIG_DEFAULT_MIGRATION_COST=10000000 | ||
24 | CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y | ||
25 | CONFIG_ARCH_PHYS_ADDR_T_64BIT=y | ||
26 | CONFIG_LOCKDEP_SUPPORT=y | ||
27 | CONFIG_STACKTRACE_SUPPORT=y | ||
28 | CONFIG_ARCH_DISCONTIGMEM_ENABLE=y | ||
29 | CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y | ||
30 | CONFIG_TRACE_IRQFLAGS_SUPPORT=y | ||
31 | CONFIG_STRICT_DEVMEM=y | ||
32 | CONFIG_SMP=y | ||
33 | CONFIG_WERROR=y | ||
34 | # CONFIG_DEBUG_COPY_FROM_USER is not set | ||
35 | CONFIG_SERIAL_CONSOLE=y | ||
36 | CONFIG_HVC_TILE=y | ||
37 | CONFIG_TILE=y | ||
38 | # CONFIG_TILEGX is not set | ||
39 | CONFIG_ARCH_DEFCONFIG="arch/tile/configs/tile_defconfig" | ||
40 | CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" | ||
41 | CONFIG_CONSTRUCTORS=y | ||
42 | |||
43 | # | ||
44 | # General setup | ||
45 | # | ||
46 | CONFIG_EXPERIMENTAL=y | ||
47 | CONFIG_LOCK_KERNEL=y | ||
48 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
49 | CONFIG_LOCALVERSION="" | ||
50 | CONFIG_LOCALVERSION_AUTO=y | ||
51 | # CONFIG_SWAP is not set | ||
52 | CONFIG_SYSVIPC=y | ||
53 | CONFIG_SYSVIPC_SYSCTL=y | ||
54 | # CONFIG_POSIX_MQUEUE is not set | ||
55 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
56 | # CONFIG_TASKSTATS is not set | ||
57 | # CONFIG_AUDIT is not set | ||
58 | |||
59 | # | ||
60 | # RCU Subsystem | ||
61 | # | ||
62 | CONFIG_TREE_RCU=y | ||
63 | # CONFIG_TREE_PREEMPT_RCU is not set | ||
64 | # CONFIG_TINY_RCU is not set | ||
65 | # CONFIG_RCU_TRACE is not set | ||
66 | CONFIG_RCU_FANOUT=32 | ||
67 | # CONFIG_RCU_FANOUT_EXACT is not set | ||
68 | # CONFIG_RCU_FAST_NO_HZ is not set | ||
69 | # CONFIG_TREE_RCU_TRACE is not set | ||
70 | # CONFIG_IKCONFIG is not set | ||
71 | CONFIG_LOG_BUF_SHIFT=17 | ||
72 | # CONFIG_CGROUPS is not set | ||
73 | # CONFIG_SYSFS_DEPRECATED_V2 is not set | ||
74 | # CONFIG_RELAY is not set | ||
75 | # CONFIG_NAMESPACES is not set | ||
76 | CONFIG_BLK_DEV_INITRD=y | ||
77 | CONFIG_INITRAMFS_SOURCE="usr/contents.txt" | ||
78 | CONFIG_INITRAMFS_ROOT_UID=0 | ||
79 | CONFIG_INITRAMFS_ROOT_GID=0 | ||
80 | CONFIG_RD_GZIP=y | ||
81 | # CONFIG_RD_BZIP2 is not set | ||
82 | # CONFIG_RD_LZMA is not set | ||
83 | # CONFIG_RD_LZO is not set | ||
84 | CONFIG_INITRAMFS_COMPRESSION_NONE=y | ||
85 | # CONFIG_INITRAMFS_COMPRESSION_GZIP is not set | ||
86 | # CONFIG_INITRAMFS_COMPRESSION_BZIP2 is not set | ||
87 | # CONFIG_INITRAMFS_COMPRESSION_LZMA is not set | ||
88 | # CONFIG_INITRAMFS_COMPRESSION_LZO is not set | ||
89 | CONFIG_SYSCTL=y | ||
90 | CONFIG_ANON_INODES=y | ||
91 | CONFIG_EMBEDDED=y | ||
92 | CONFIG_SYSCTL_SYSCALL=y | ||
93 | CONFIG_KALLSYMS=y | ||
94 | # CONFIG_KALLSYMS_ALL is not set | ||
95 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
96 | CONFIG_HOTPLUG=y | ||
97 | CONFIG_PRINTK=y | ||
98 | CONFIG_BUG=y | ||
99 | CONFIG_ELF_CORE=y | ||
100 | CONFIG_BASE_FULL=y | ||
101 | CONFIG_FUTEX=y | ||
102 | CONFIG_EPOLL=y | ||
103 | CONFIG_SIGNALFD=y | ||
104 | CONFIG_TIMERFD=y | ||
105 | CONFIG_EVENTFD=y | ||
106 | CONFIG_SHMEM=y | ||
107 | CONFIG_AIO=y | ||
108 | |||
109 | # | ||
110 | # Kernel Performance Events And Counters | ||
111 | # | ||
112 | CONFIG_VM_EVENT_COUNTERS=y | ||
113 | CONFIG_PCI_QUIRKS=y | ||
114 | CONFIG_SLUB_DEBUG=y | ||
115 | # CONFIG_COMPAT_BRK is not set | ||
116 | # CONFIG_SLAB is not set | ||
117 | CONFIG_SLUB=y | ||
118 | # CONFIG_SLOB is not set | ||
119 | CONFIG_PROFILING=y | ||
120 | CONFIG_OPROFILE=y | ||
121 | CONFIG_HAVE_OPROFILE=y | ||
122 | CONFIG_USE_GENERIC_SMP_HELPERS=y | ||
123 | |||
124 | # | ||
125 | # GCOV-based kernel profiling | ||
126 | # | ||
127 | # CONFIG_SLOW_WORK is not set | ||
128 | # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set | ||
129 | CONFIG_SLABINFO=y | ||
130 | CONFIG_RT_MUTEXES=y | ||
131 | CONFIG_BASE_SMALL=0 | ||
132 | CONFIG_MODULES=y | ||
133 | # CONFIG_MODULE_FORCE_LOAD is not set | ||
134 | CONFIG_MODULE_UNLOAD=y | ||
135 | # CONFIG_MODULE_FORCE_UNLOAD is not set | ||
136 | # CONFIG_MODVERSIONS is not set | ||
137 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
138 | CONFIG_STOP_MACHINE=y | ||
139 | CONFIG_BLOCK=y | ||
140 | CONFIG_LBDAF=y | ||
141 | # CONFIG_BLK_DEV_BSG is not set | ||
142 | # CONFIG_BLK_DEV_INTEGRITY is not set | ||
143 | |||
144 | # | ||
145 | # IO Schedulers | ||
146 | # | ||
147 | CONFIG_IOSCHED_NOOP=y | ||
148 | # CONFIG_IOSCHED_DEADLINE is not set | ||
149 | # CONFIG_IOSCHED_CFQ is not set | ||
150 | # CONFIG_DEFAULT_DEADLINE is not set | ||
151 | # CONFIG_DEFAULT_CFQ is not set | ||
152 | CONFIG_DEFAULT_NOOP=y | ||
153 | CONFIG_DEFAULT_IOSCHED="noop" | ||
154 | # CONFIG_INLINE_SPIN_TRYLOCK is not set | ||
155 | # CONFIG_INLINE_SPIN_TRYLOCK_BH is not set | ||
156 | # CONFIG_INLINE_SPIN_LOCK is not set | ||
157 | # CONFIG_INLINE_SPIN_LOCK_BH is not set | ||
158 | # CONFIG_INLINE_SPIN_LOCK_IRQ is not set | ||
159 | # CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set | ||
160 | CONFIG_INLINE_SPIN_UNLOCK=y | ||
161 | # CONFIG_INLINE_SPIN_UNLOCK_BH is not set | ||
162 | CONFIG_INLINE_SPIN_UNLOCK_IRQ=y | ||
163 | # CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set | ||
164 | # CONFIG_INLINE_READ_TRYLOCK is not set | ||
165 | # CONFIG_INLINE_READ_LOCK is not set | ||
166 | # CONFIG_INLINE_READ_LOCK_BH is not set | ||
167 | # CONFIG_INLINE_READ_LOCK_IRQ is not set | ||
168 | # CONFIG_INLINE_READ_LOCK_IRQSAVE is not set | ||
169 | CONFIG_INLINE_READ_UNLOCK=y | ||
170 | # CONFIG_INLINE_READ_UNLOCK_BH is not set | ||
171 | CONFIG_INLINE_READ_UNLOCK_IRQ=y | ||
172 | # CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set | ||
173 | # CONFIG_INLINE_WRITE_TRYLOCK is not set | ||
174 | # CONFIG_INLINE_WRITE_LOCK is not set | ||
175 | # CONFIG_INLINE_WRITE_LOCK_BH is not set | ||
176 | # CONFIG_INLINE_WRITE_LOCK_IRQ is not set | ||
177 | # CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set | ||
178 | CONFIG_INLINE_WRITE_UNLOCK=y | ||
179 | # CONFIG_INLINE_WRITE_UNLOCK_BH is not set | ||
180 | CONFIG_INLINE_WRITE_UNLOCK_IRQ=y | ||
181 | # CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set | ||
182 | CONFIG_MUTEX_SPIN_ON_OWNER=y | ||
183 | |||
184 | # | ||
185 | # Tilera-specific configuration | ||
186 | # | ||
187 | CONFIG_NR_CPUS=64 | ||
188 | CONFIG_HOMECACHE=y | ||
189 | CONFIG_DATAPLANE=y | ||
190 | CONFIG_TICK_ONESHOT=y | ||
191 | CONFIG_NO_HZ=y | ||
192 | CONFIG_HIGH_RES_TIMERS=y | ||
193 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | ||
194 | CONFIG_HZ_100=y | ||
195 | # CONFIG_HZ_250 is not set | ||
196 | # CONFIG_HZ_300 is not set | ||
197 | # CONFIG_HZ_1000 is not set | ||
198 | CONFIG_HZ=100 | ||
199 | CONFIG_SCHED_HRTICK=y | ||
200 | # CONFIG_KEXEC is not set | ||
201 | CONFIG_HIGHMEM=y | ||
202 | CONFIG_NUMA=y | ||
203 | CONFIG_NODES_SHIFT=2 | ||
204 | CONFIG_FORCE_MAX_ZONEORDER=9 | ||
205 | # CONFIG_VMSPLIT_375G is not set | ||
206 | # CONFIG_VMSPLIT_35G is not set | ||
207 | CONFIG_VMSPLIT_3G=y | ||
208 | # CONFIG_VMSPLIT_3G_OPT is not set | ||
209 | # CONFIG_VMSPLIT_2G is not set | ||
210 | # CONFIG_VMSPLIT_1G is not set | ||
211 | CONFIG_PAGE_OFFSET=0xC0000000 | ||
212 | CONFIG_SELECT_MEMORY_MODEL=y | ||
213 | # CONFIG_FLATMEM_MANUAL is not set | ||
214 | CONFIG_DISCONTIGMEM_MANUAL=y | ||
215 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
216 | CONFIG_DISCONTIGMEM=y | ||
217 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
218 | CONFIG_NEED_MULTIPLE_NODES=y | ||
219 | CONFIG_PAGEFLAGS_EXTENDED=y | ||
220 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
221 | CONFIG_MIGRATION=y | ||
222 | CONFIG_PHYS_ADDR_T_64BIT=y | ||
223 | CONFIG_ZONE_DMA_FLAG=1 | ||
224 | CONFIG_BOUNCE=y | ||
225 | CONFIG_VIRT_TO_BUS=y | ||
226 | # CONFIG_KSM is not set | ||
227 | CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 | ||
228 | # CONFIG_CMDLINE_BOOL is not set | ||
229 | # CONFIG_FEEDBACK_COLLECT is not set | ||
230 | CONFIG_FEEDBACK_USE="" | ||
231 | # CONFIG_HUGEVMAP is not set | ||
232 | CONFIG_VMALLOC_RESERVE=0x1000000 | ||
233 | CONFIG_HARDWALL=y | ||
234 | CONFIG_MEMPROF=y | ||
235 | CONFIG_XGBE_MAIN=y | ||
236 | CONFIG_NET_TILE=y | ||
237 | CONFIG_PSEUDO_NAPI=y | ||
238 | CONFIG_TILEPCI_ENDP=y | ||
239 | CONFIG_TILE_IDE_GPIO=y | ||
240 | CONFIG_TILE_SOFTUART=y | ||
241 | |||
242 | # | ||
243 | # Bus options | ||
244 | # | ||
245 | CONFIG_PCI=y | ||
246 | CONFIG_PCI_DOMAINS=y | ||
247 | # CONFIG_ARCH_SUPPORTS_MSI is not set | ||
248 | CONFIG_PCI_DEBUG=y | ||
249 | # CONFIG_PCI_STUB is not set | ||
250 | # CONFIG_PCI_IOV is not set | ||
251 | # CONFIG_HOTPLUG_PCI is not set | ||
252 | |||
253 | # | ||
254 | # Executable file formats | ||
255 | # | ||
256 | CONFIG_KCORE_ELF=y | ||
257 | CONFIG_BINFMT_ELF=y | ||
258 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
259 | # CONFIG_HAVE_AOUT is not set | ||
260 | # CONFIG_BINFMT_MISC is not set | ||
261 | CONFIG_NET=y | ||
262 | |||
263 | # | ||
264 | # Networking options | ||
265 | # | ||
266 | CONFIG_PACKET=y | ||
267 | CONFIG_UNIX=y | ||
268 | CONFIG_XFRM=y | ||
269 | # CONFIG_XFRM_USER is not set | ||
270 | # CONFIG_XFRM_SUB_POLICY is not set | ||
271 | # CONFIG_XFRM_MIGRATE is not set | ||
272 | # CONFIG_XFRM_STATISTICS is not set | ||
273 | # CONFIG_NET_KEY is not set | ||
274 | CONFIG_INET=y | ||
275 | CONFIG_IP_MULTICAST=y | ||
276 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
277 | CONFIG_IP_FIB_HASH=y | ||
278 | # CONFIG_IP_PNP is not set | ||
279 | # CONFIG_NET_IPIP is not set | ||
280 | # CONFIG_NET_IPGRE is not set | ||
281 | # CONFIG_IP_MROUTE is not set | ||
282 | # CONFIG_ARPD is not set | ||
283 | # CONFIG_SYN_COOKIES is not set | ||
284 | # CONFIG_INET_AH is not set | ||
285 | # CONFIG_INET_ESP is not set | ||
286 | # CONFIG_INET_IPCOMP is not set | ||
287 | # CONFIG_INET_XFRM_TUNNEL is not set | ||
288 | CONFIG_INET_TUNNEL=y | ||
289 | # CONFIG_INET_XFRM_MODE_TRANSPORT is not set | ||
290 | # CONFIG_INET_XFRM_MODE_TUNNEL is not set | ||
291 | CONFIG_INET_XFRM_MODE_BEET=y | ||
292 | # CONFIG_INET_LRO is not set | ||
293 | # CONFIG_INET_DIAG is not set | ||
294 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
295 | CONFIG_TCP_CONG_CUBIC=y | ||
296 | CONFIG_DEFAULT_TCP_CONG="cubic" | ||
297 | # CONFIG_TCP_MD5SIG is not set | ||
298 | CONFIG_IPV6=y | ||
299 | # CONFIG_IPV6_PRIVACY is not set | ||
300 | # CONFIG_IPV6_ROUTER_PREF is not set | ||
301 | # CONFIG_IPV6_OPTIMISTIC_DAD is not set | ||
302 | # CONFIG_INET6_AH is not set | ||
303 | # CONFIG_INET6_ESP is not set | ||
304 | # CONFIG_INET6_IPCOMP is not set | ||
305 | # CONFIG_IPV6_MIP6 is not set | ||
306 | # CONFIG_INET6_XFRM_TUNNEL is not set | ||
307 | # CONFIG_INET6_TUNNEL is not set | ||
308 | CONFIG_INET6_XFRM_MODE_TRANSPORT=y | ||
309 | CONFIG_INET6_XFRM_MODE_TUNNEL=y | ||
310 | CONFIG_INET6_XFRM_MODE_BEET=y | ||
311 | # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set | ||
312 | CONFIG_IPV6_SIT=y | ||
313 | # CONFIG_IPV6_SIT_6RD is not set | ||
314 | CONFIG_IPV6_NDISC_NODETYPE=y | ||
315 | # CONFIG_IPV6_TUNNEL is not set | ||
316 | # CONFIG_IPV6_MULTIPLE_TABLES is not set | ||
317 | # CONFIG_IPV6_MROUTE is not set | ||
318 | # CONFIG_NETWORK_SECMARK is not set | ||
319 | # CONFIG_NETFILTER is not set | ||
320 | # CONFIG_IP_DCCP is not set | ||
321 | # CONFIG_IP_SCTP is not set | ||
322 | # CONFIG_RDS is not set | ||
323 | # CONFIG_TIPC is not set | ||
324 | # CONFIG_ATM is not set | ||
325 | # CONFIG_BRIDGE is not set | ||
326 | # CONFIG_NET_DSA is not set | ||
327 | # CONFIG_VLAN_8021Q is not set | ||
328 | # CONFIG_DECNET is not set | ||
329 | # CONFIG_LLC2 is not set | ||
330 | # CONFIG_IPX is not set | ||
331 | # CONFIG_ATALK is not set | ||
332 | # CONFIG_X25 is not set | ||
333 | # CONFIG_LAPB is not set | ||
334 | # CONFIG_ECONET is not set | ||
335 | # CONFIG_WAN_ROUTER is not set | ||
336 | # CONFIG_PHONET is not set | ||
337 | # CONFIG_IEEE802154 is not set | ||
338 | # CONFIG_NET_SCHED is not set | ||
339 | # CONFIG_DCB is not set | ||
340 | |||
341 | # | ||
342 | # Network testing | ||
343 | # | ||
344 | # CONFIG_NET_PKTGEN is not set | ||
345 | # CONFIG_HAMRADIO is not set | ||
346 | # CONFIG_CAN is not set | ||
347 | # CONFIG_IRDA is not set | ||
348 | # CONFIG_BT is not set | ||
349 | # CONFIG_AF_RXRPC is not set | ||
350 | # CONFIG_WIRELESS is not set | ||
351 | # CONFIG_WIMAX is not set | ||
352 | # CONFIG_RFKILL is not set | ||
353 | # CONFIG_NET_9P is not set | ||
354 | |||
355 | # | ||
356 | # Device Drivers | ||
357 | # | ||
358 | |||
359 | # | ||
360 | # Generic Driver Options | ||
361 | # | ||
362 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
363 | # CONFIG_DEVTMPFS is not set | ||
364 | CONFIG_STANDALONE=y | ||
365 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
366 | CONFIG_FW_LOADER=y | ||
367 | CONFIG_FIRMWARE_IN_KERNEL=y | ||
368 | CONFIG_EXTRA_FIRMWARE="" | ||
369 | # CONFIG_DEBUG_DRIVER is not set | ||
370 | # CONFIG_DEBUG_DEVRES is not set | ||
371 | # CONFIG_SYS_HYPERVISOR is not set | ||
372 | # CONFIG_CONNECTOR is not set | ||
373 | # CONFIG_MTD is not set | ||
374 | # CONFIG_PARPORT is not set | ||
375 | CONFIG_BLK_DEV=y | ||
376 | # CONFIG_BLK_CPQ_DA is not set | ||
377 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
378 | # CONFIG_BLK_DEV_DAC960 is not set | ||
379 | # CONFIG_BLK_DEV_UMEM is not set | ||
380 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
381 | # CONFIG_BLK_DEV_LOOP is not set | ||
382 | |||
383 | # | ||
384 | # DRBD disabled because PROC_FS, INET or CONNECTOR not selected | ||
385 | # | ||
386 | # CONFIG_BLK_DEV_NBD is not set | ||
387 | # CONFIG_BLK_DEV_SX8 is not set | ||
388 | # CONFIG_BLK_DEV_RAM is not set | ||
389 | # CONFIG_CDROM_PKTCDVD is not set | ||
390 | # CONFIG_ATA_OVER_ETH is not set | ||
391 | # CONFIG_BLK_DEV_HD is not set | ||
392 | CONFIG_MISC_DEVICES=y | ||
393 | # CONFIG_AD525X_DPOT is not set | ||
394 | # CONFIG_PHANTOM is not set | ||
395 | # CONFIG_SGI_IOC4 is not set | ||
396 | # CONFIG_TIFM_CORE is not set | ||
397 | # CONFIG_ICS932S401 is not set | ||
398 | # CONFIG_ENCLOSURE_SERVICES is not set | ||
399 | # CONFIG_HP_ILO is not set | ||
400 | # CONFIG_ISL29003 is not set | ||
401 | # CONFIG_SENSORS_TSL2550 is not set | ||
402 | # CONFIG_DS1682 is not set | ||
403 | # CONFIG_C2PORT is not set | ||
404 | |||
405 | # | ||
406 | # EEPROM support | ||
407 | # | ||
408 | # CONFIG_EEPROM_AT24 is not set | ||
409 | # CONFIG_EEPROM_LEGACY is not set | ||
410 | # CONFIG_EEPROM_MAX6875 is not set | ||
411 | # CONFIG_EEPROM_93CX6 is not set | ||
412 | # CONFIG_CB710_CORE is not set | ||
413 | CONFIG_HAVE_IDE=y | ||
414 | CONFIG_IDE=y | ||
415 | |||
416 | # | ||
417 | # Please see Documentation/ide/ide.txt for help/info on IDE drives | ||
418 | # | ||
419 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
420 | CONFIG_IDE_GD=y | ||
421 | CONFIG_IDE_GD_ATA=y | ||
422 | # CONFIG_IDE_GD_ATAPI is not set | ||
423 | # CONFIG_BLK_DEV_IDECD is not set | ||
424 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
425 | # CONFIG_IDE_TASK_IOCTL is not set | ||
426 | CONFIG_IDE_PROC_FS=y | ||
427 | |||
428 | # | ||
429 | # IDE chipset support/bugfixes | ||
430 | # | ||
431 | # CONFIG_BLK_DEV_PLATFORM is not set | ||
432 | |||
433 | # | ||
434 | # PCI IDE chipsets support | ||
435 | # | ||
436 | # CONFIG_BLK_DEV_GENERIC is not set | ||
437 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
438 | # CONFIG_BLK_DEV_AEC62XX is not set | ||
439 | # CONFIG_BLK_DEV_ALI15X3 is not set | ||
440 | # CONFIG_BLK_DEV_AMD74XX is not set | ||
441 | # CONFIG_BLK_DEV_CMD64X is not set | ||
442 | # CONFIG_BLK_DEV_TRIFLEX is not set | ||
443 | # CONFIG_BLK_DEV_CS5520 is not set | ||
444 | # CONFIG_BLK_DEV_CS5530 is not set | ||
445 | # CONFIG_BLK_DEV_HPT366 is not set | ||
446 | # CONFIG_BLK_DEV_JMICRON is not set | ||
447 | # CONFIG_BLK_DEV_SC1200 is not set | ||
448 | # CONFIG_BLK_DEV_PIIX is not set | ||
449 | # CONFIG_BLK_DEV_IT8172 is not set | ||
450 | # CONFIG_BLK_DEV_IT8213 is not set | ||
451 | # CONFIG_BLK_DEV_IT821X is not set | ||
452 | # CONFIG_BLK_DEV_NS87415 is not set | ||
453 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | ||
454 | # CONFIG_BLK_DEV_PDC202XX_NEW is not set | ||
455 | # CONFIG_BLK_DEV_SVWKS is not set | ||
456 | # CONFIG_BLK_DEV_SIIMAGE is not set | ||
457 | # CONFIG_BLK_DEV_SLC90E66 is not set | ||
458 | # CONFIG_BLK_DEV_TRM290 is not set | ||
459 | # CONFIG_BLK_DEV_VIA82CXXX is not set | ||
460 | # CONFIG_BLK_DEV_TC86C001 is not set | ||
461 | # CONFIG_BLK_DEV_IDEDMA is not set | ||
462 | |||
463 | # | ||
464 | # SCSI device support | ||
465 | # | ||
466 | CONFIG_SCSI_MOD=y | ||
467 | # CONFIG_RAID_ATTRS is not set | ||
468 | CONFIG_SCSI=y | ||
469 | CONFIG_SCSI_DMA=y | ||
470 | # CONFIG_SCSI_TGT is not set | ||
471 | # CONFIG_SCSI_NETLINK is not set | ||
472 | CONFIG_SCSI_PROC_FS=y | ||
473 | |||
474 | # | ||
475 | # SCSI support type (disk, tape, CD-ROM) | ||
476 | # | ||
477 | CONFIG_BLK_DEV_SD=y | ||
478 | # CONFIG_CHR_DEV_ST is not set | ||
479 | # CONFIG_CHR_DEV_OSST is not set | ||
480 | # CONFIG_BLK_DEV_SR is not set | ||
481 | # CONFIG_CHR_DEV_SG is not set | ||
482 | # CONFIG_CHR_DEV_SCH is not set | ||
483 | # CONFIG_SCSI_MULTI_LUN is not set | ||
484 | CONFIG_SCSI_CONSTANTS=y | ||
485 | CONFIG_SCSI_LOGGING=y | ||
486 | # CONFIG_SCSI_SCAN_ASYNC is not set | ||
487 | CONFIG_SCSI_WAIT_SCAN=m | ||
488 | |||
489 | # | ||
490 | # SCSI Transports | ||
491 | # | ||
492 | # CONFIG_SCSI_SPI_ATTRS is not set | ||
493 | # CONFIG_SCSI_FC_ATTRS is not set | ||
494 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
495 | # CONFIG_SCSI_SAS_LIBSAS is not set | ||
496 | # CONFIG_SCSI_SRP_ATTRS is not set | ||
497 | CONFIG_SCSI_LOWLEVEL=y | ||
498 | # CONFIG_ISCSI_TCP is not set | ||
499 | # CONFIG_SCSI_BNX2_ISCSI is not set | ||
500 | # CONFIG_BE2ISCSI is not set | ||
501 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | ||
502 | # CONFIG_SCSI_HPSA is not set | ||
503 | # CONFIG_SCSI_3W_9XXX is not set | ||
504 | # CONFIG_SCSI_3W_SAS is not set | ||
505 | # CONFIG_SCSI_ACARD is not set | ||
506 | # CONFIG_SCSI_AACRAID is not set | ||
507 | # CONFIG_SCSI_AIC7XXX is not set | ||
508 | # CONFIG_SCSI_AIC7XXX_OLD is not set | ||
509 | # CONFIG_SCSI_AIC79XX is not set | ||
510 | # CONFIG_SCSI_AIC94XX is not set | ||
511 | # CONFIG_SCSI_MVSAS is not set | ||
512 | # CONFIG_SCSI_DPT_I2O is not set | ||
513 | # CONFIG_SCSI_ADVANSYS is not set | ||
514 | # CONFIG_SCSI_ARCMSR is not set | ||
515 | # CONFIG_MEGARAID_NEWGEN is not set | ||
516 | # CONFIG_MEGARAID_LEGACY is not set | ||
517 | # CONFIG_MEGARAID_SAS is not set | ||
518 | # CONFIG_SCSI_MPT2SAS is not set | ||
519 | # CONFIG_SCSI_HPTIOP is not set | ||
520 | # CONFIG_LIBFC is not set | ||
521 | # CONFIG_LIBFCOE is not set | ||
522 | # CONFIG_FCOE is not set | ||
523 | # CONFIG_SCSI_DMX3191D is not set | ||
524 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | ||
525 | # CONFIG_SCSI_IPS is not set | ||
526 | # CONFIG_SCSI_INITIO is not set | ||
527 | # CONFIG_SCSI_INIA100 is not set | ||
528 | # CONFIG_SCSI_STEX is not set | ||
529 | # CONFIG_SCSI_SYM53C8XX_2 is not set | ||
530 | # CONFIG_SCSI_IPR is not set | ||
531 | # CONFIG_SCSI_QLOGIC_1280 is not set | ||
532 | # CONFIG_SCSI_QLA_FC is not set | ||
533 | # CONFIG_SCSI_QLA_ISCSI is not set | ||
534 | # CONFIG_SCSI_LPFC is not set | ||
535 | # CONFIG_SCSI_DC395x is not set | ||
536 | # CONFIG_SCSI_DC390T is not set | ||
537 | # CONFIG_SCSI_NSP32 is not set | ||
538 | # CONFIG_SCSI_DEBUG is not set | ||
539 | # CONFIG_SCSI_PMCRAID is not set | ||
540 | # CONFIG_SCSI_PM8001 is not set | ||
541 | # CONFIG_SCSI_SRP is not set | ||
542 | # CONFIG_SCSI_BFA_FC is not set | ||
543 | # CONFIG_SCSI_LOWLEVEL_PCMCIA is not set | ||
544 | # CONFIG_SCSI_DH is not set | ||
545 | # CONFIG_SCSI_OSD_INITIATOR is not set | ||
546 | CONFIG_ATA=y | ||
547 | # CONFIG_ATA_NONSTANDARD is not set | ||
548 | CONFIG_ATA_VERBOSE_ERROR=y | ||
549 | CONFIG_SATA_PMP=y | ||
550 | # CONFIG_SATA_AHCI is not set | ||
551 | CONFIG_SATA_SIL24=y | ||
552 | CONFIG_ATA_SFF=y | ||
553 | # CONFIG_SATA_SVW is not set | ||
554 | # CONFIG_ATA_PIIX is not set | ||
555 | # CONFIG_SATA_MV is not set | ||
556 | # CONFIG_SATA_NV is not set | ||
557 | # CONFIG_PDC_ADMA is not set | ||
558 | # CONFIG_SATA_QSTOR is not set | ||
559 | # CONFIG_SATA_PROMISE is not set | ||
560 | # CONFIG_SATA_SX4 is not set | ||
561 | # CONFIG_SATA_SIL is not set | ||
562 | # CONFIG_SATA_SIS is not set | ||
563 | # CONFIG_SATA_ULI is not set | ||
564 | # CONFIG_SATA_VIA is not set | ||
565 | # CONFIG_SATA_VITESSE is not set | ||
566 | # CONFIG_SATA_INIC162X is not set | ||
567 | # CONFIG_PATA_ALI is not set | ||
568 | # CONFIG_PATA_AMD is not set | ||
569 | # CONFIG_PATA_ARTOP is not set | ||
570 | # CONFIG_PATA_ATP867X is not set | ||
571 | # CONFIG_PATA_ATIIXP is not set | ||
572 | # CONFIG_PATA_CMD640_PCI is not set | ||
573 | # CONFIG_PATA_CMD64X is not set | ||
574 | # CONFIG_PATA_CS5520 is not set | ||
575 | # CONFIG_PATA_CS5530 is not set | ||
576 | # CONFIG_PATA_CYPRESS is not set | ||
577 | # CONFIG_PATA_EFAR is not set | ||
578 | # CONFIG_ATA_GENERIC is not set | ||
579 | # CONFIG_PATA_HPT366 is not set | ||
580 | # CONFIG_PATA_HPT37X is not set | ||
581 | # CONFIG_PATA_HPT3X2N is not set | ||
582 | # CONFIG_PATA_HPT3X3 is not set | ||
583 | # CONFIG_PATA_IT821X is not set | ||
584 | # CONFIG_PATA_IT8213 is not set | ||
585 | # CONFIG_PATA_JMICRON is not set | ||
586 | # CONFIG_PATA_LEGACY is not set | ||
587 | # CONFIG_PATA_TRIFLEX is not set | ||
588 | # CONFIG_PATA_MARVELL is not set | ||
589 | # CONFIG_PATA_MPIIX is not set | ||
590 | # CONFIG_PATA_OLDPIIX is not set | ||
591 | # CONFIG_PATA_NETCELL is not set | ||
592 | # CONFIG_PATA_NINJA32 is not set | ||
593 | # CONFIG_PATA_NS87410 is not set | ||
594 | # CONFIG_PATA_NS87415 is not set | ||
595 | # CONFIG_PATA_OPTI is not set | ||
596 | # CONFIG_PATA_OPTIDMA is not set | ||
597 | # CONFIG_PATA_PDC2027X is not set | ||
598 | # CONFIG_PATA_PDC_OLD is not set | ||
599 | # CONFIG_PATA_RADISYS is not set | ||
600 | # CONFIG_PATA_RDC is not set | ||
601 | # CONFIG_PATA_RZ1000 is not set | ||
602 | # CONFIG_PATA_SC1200 is not set | ||
603 | # CONFIG_PATA_SERVERWORKS is not set | ||
604 | # CONFIG_PATA_SIL680 is not set | ||
605 | # CONFIG_PATA_SIS is not set | ||
606 | # CONFIG_PATA_TOSHIBA is not set | ||
607 | # CONFIG_PATA_VIA is not set | ||
608 | # CONFIG_PATA_WINBOND is not set | ||
609 | # CONFIG_PATA_PLATFORM is not set | ||
610 | # CONFIG_PATA_SCH is not set | ||
611 | # CONFIG_MD is not set | ||
612 | # CONFIG_FUSION is not set | ||
613 | |||
614 | # | ||
615 | # IEEE 1394 (FireWire) support | ||
616 | # | ||
617 | |||
618 | # | ||
619 | # You can enable one or both FireWire driver stacks. | ||
620 | # | ||
621 | |||
622 | # | ||
623 | # The newer stack is recommended. | ||
624 | # | ||
625 | # CONFIG_FIREWIRE is not set | ||
626 | # CONFIG_IEEE1394 is not set | ||
627 | # CONFIG_I2O is not set | ||
628 | CONFIG_NETDEVICES=y | ||
629 | # CONFIG_DUMMY is not set | ||
630 | # CONFIG_BONDING is not set | ||
631 | # CONFIG_MACVLAN is not set | ||
632 | # CONFIG_EQUALIZER is not set | ||
633 | CONFIG_TUN=y | ||
634 | # CONFIG_VETH is not set | ||
635 | # CONFIG_ARCNET is not set | ||
636 | # CONFIG_NET_ETHERNET is not set | ||
637 | CONFIG_NETDEV_1000=y | ||
638 | # CONFIG_ACENIC is not set | ||
639 | # CONFIG_DL2K is not set | ||
640 | # CONFIG_E1000 is not set | ||
641 | CONFIG_E1000E=y | ||
642 | # CONFIG_IP1000 is not set | ||
643 | # CONFIG_IGB is not set | ||
644 | # CONFIG_IGBVF is not set | ||
645 | # CONFIG_NS83820 is not set | ||
646 | # CONFIG_HAMACHI is not set | ||
647 | # CONFIG_YELLOWFIN is not set | ||
648 | # CONFIG_R8169 is not set | ||
649 | # CONFIG_SIS190 is not set | ||
650 | # CONFIG_SKGE is not set | ||
651 | # CONFIG_SKY2 is not set | ||
652 | # CONFIG_VIA_VELOCITY is not set | ||
653 | # CONFIG_TIGON3 is not set | ||
654 | # CONFIG_BNX2 is not set | ||
655 | # CONFIG_CNIC is not set | ||
656 | # CONFIG_QLA3XXX is not set | ||
657 | # CONFIG_ATL1 is not set | ||
658 | # CONFIG_ATL1E is not set | ||
659 | # CONFIG_ATL1C is not set | ||
660 | # CONFIG_JME is not set | ||
661 | # CONFIG_NETDEV_10000 is not set | ||
662 | # CONFIG_TR is not set | ||
663 | # CONFIG_WLAN is not set | ||
664 | |||
665 | # | ||
666 | # Enable WiMAX (Networking options) to see the WiMAX drivers | ||
667 | # | ||
668 | # CONFIG_WAN is not set | ||
669 | # CONFIG_FDDI is not set | ||
670 | # CONFIG_HIPPI is not set | ||
671 | # CONFIG_PPP is not set | ||
672 | # CONFIG_SLIP is not set | ||
673 | # CONFIG_NET_FC is not set | ||
674 | # CONFIG_NETCONSOLE is not set | ||
675 | # CONFIG_NETPOLL is not set | ||
676 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
677 | # CONFIG_VMXNET3 is not set | ||
678 | # CONFIG_ISDN is not set | ||
679 | # CONFIG_PHONE is not set | ||
680 | |||
681 | # | ||
682 | # Input device support | ||
683 | # | ||
684 | CONFIG_INPUT=y | ||
685 | # CONFIG_INPUT_FF_MEMLESS is not set | ||
686 | # CONFIG_INPUT_POLLDEV is not set | ||
687 | # CONFIG_INPUT_SPARSEKMAP is not set | ||
688 | |||
689 | # | ||
690 | # Userland interfaces | ||
691 | # | ||
692 | # CONFIG_INPUT_MOUSEDEV is not set | ||
693 | # CONFIG_INPUT_JOYDEV is not set | ||
694 | # CONFIG_INPUT_EVDEV is not set | ||
695 | # CONFIG_INPUT_EVBUG is not set | ||
696 | |||
697 | # | ||
698 | # Input Device Drivers | ||
699 | # | ||
700 | # CONFIG_INPUT_KEYBOARD is not set | ||
701 | # CONFIG_INPUT_MOUSE is not set | ||
702 | # CONFIG_INPUT_JOYSTICK is not set | ||
703 | # CONFIG_INPUT_TABLET is not set | ||
704 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
705 | # CONFIG_INPUT_MISC is not set | ||
706 | |||
707 | # | ||
708 | # Hardware I/O ports | ||
709 | # | ||
710 | # CONFIG_SERIO is not set | ||
711 | # CONFIG_GAMEPORT is not set | ||
712 | |||
713 | # | ||
714 | # Character devices | ||
715 | # | ||
716 | # CONFIG_VT is not set | ||
717 | CONFIG_DEVKMEM=y | ||
718 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
719 | # CONFIG_NOZOMI is not set | ||
720 | |||
721 | # | ||
722 | # Serial drivers | ||
723 | # | ||
724 | # CONFIG_SERIAL_8250 is not set | ||
725 | |||
726 | # | ||
727 | # Non-8250 serial port support | ||
728 | # | ||
729 | # CONFIG_SERIAL_JSM is not set | ||
730 | # CONFIG_SERIAL_TIMBERDALE is not set | ||
731 | CONFIG_UNIX98_PTYS=y | ||
732 | # CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set | ||
733 | # CONFIG_LEGACY_PTYS is not set | ||
734 | CONFIG_HVC_DRIVER=y | ||
735 | # CONFIG_IPMI_HANDLER is not set | ||
736 | # CONFIG_HW_RANDOM is not set | ||
737 | # CONFIG_R3964 is not set | ||
738 | # CONFIG_APPLICOM is not set | ||
739 | |||
740 | # | ||
741 | # PCMCIA character devices | ||
742 | # | ||
743 | # CONFIG_RAW_DRIVER is not set | ||
744 | # CONFIG_TCG_TPM is not set | ||
745 | CONFIG_I2C=y | ||
746 | CONFIG_I2C_BOARDINFO=y | ||
747 | CONFIG_I2C_COMPAT=y | ||
748 | CONFIG_I2C_CHARDEV=y | ||
749 | CONFIG_I2C_HELPER_AUTO=y | ||
750 | |||
751 | # | ||
752 | # I2C Hardware Bus support | ||
753 | # | ||
754 | |||
755 | # | ||
756 | # PC SMBus host controller drivers | ||
757 | # | ||
758 | # CONFIG_I2C_ALI1535 is not set | ||
759 | # CONFIG_I2C_ALI1563 is not set | ||
760 | # CONFIG_I2C_ALI15X3 is not set | ||
761 | # CONFIG_I2C_AMD756 is not set | ||
762 | # CONFIG_I2C_AMD8111 is not set | ||
763 | # CONFIG_I2C_I801 is not set | ||
764 | # CONFIG_I2C_ISCH is not set | ||
765 | # CONFIG_I2C_PIIX4 is not set | ||
766 | # CONFIG_I2C_NFORCE2 is not set | ||
767 | # CONFIG_I2C_SIS5595 is not set | ||
768 | # CONFIG_I2C_SIS630 is not set | ||
769 | # CONFIG_I2C_SIS96X is not set | ||
770 | # CONFIG_I2C_VIA is not set | ||
771 | # CONFIG_I2C_VIAPRO is not set | ||
772 | |||
773 | # | ||
774 | # I2C system bus drivers (mostly embedded / system-on-chip) | ||
775 | # | ||
776 | # CONFIG_I2C_OCORES is not set | ||
777 | # CONFIG_I2C_SIMTEC is not set | ||
778 | # CONFIG_I2C_XILINX is not set | ||
779 | |||
780 | # | ||
781 | # External I2C/SMBus adapter drivers | ||
782 | # | ||
783 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
784 | # CONFIG_I2C_TAOS_EVM is not set | ||
785 | |||
786 | # | ||
787 | # Other I2C/SMBus bus drivers | ||
788 | # | ||
789 | # CONFIG_I2C_PCA_PLATFORM is not set | ||
790 | # CONFIG_I2C_STUB is not set | ||
791 | # CONFIG_I2C_DEBUG_CORE is not set | ||
792 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
793 | # CONFIG_I2C_DEBUG_BUS is not set | ||
794 | # CONFIG_SPI is not set | ||
795 | |||
796 | # | ||
797 | # PPS support | ||
798 | # | ||
799 | # CONFIG_PPS is not set | ||
800 | # CONFIG_W1 is not set | ||
801 | # CONFIG_POWER_SUPPLY is not set | ||
802 | # CONFIG_HWMON is not set | ||
803 | # CONFIG_THERMAL is not set | ||
804 | CONFIG_WATCHDOG=y | ||
805 | CONFIG_WATCHDOG_NOWAYOUT=y | ||
806 | |||
807 | # | ||
808 | # Watchdog Device Drivers | ||
809 | # | ||
810 | # CONFIG_SOFT_WATCHDOG is not set | ||
811 | # CONFIG_ALIM7101_WDT is not set | ||
812 | |||
813 | # | ||
814 | # PCI-based Watchdog Cards | ||
815 | # | ||
816 | # CONFIG_PCIPCWATCHDOG is not set | ||
817 | # CONFIG_WDTPCI is not set | ||
818 | CONFIG_SSB_POSSIBLE=y | ||
819 | |||
820 | # | ||
821 | # Sonics Silicon Backplane | ||
822 | # | ||
823 | # CONFIG_SSB is not set | ||
824 | |||
825 | # | ||
826 | # Multifunction device drivers | ||
827 | # | ||
828 | # CONFIG_MFD_CORE is not set | ||
829 | # CONFIG_MFD_88PM860X is not set | ||
830 | # CONFIG_MFD_SM501 is not set | ||
831 | # CONFIG_HTC_PASIC3 is not set | ||
832 | # CONFIG_TWL4030_CORE is not set | ||
833 | # CONFIG_MFD_TMIO is not set | ||
834 | # CONFIG_PMIC_DA903X is not set | ||
835 | # CONFIG_PMIC_ADP5520 is not set | ||
836 | # CONFIG_MFD_MAX8925 is not set | ||
837 | # CONFIG_MFD_WM8400 is not set | ||
838 | # CONFIG_MFD_WM831X is not set | ||
839 | # CONFIG_MFD_WM8350_I2C is not set | ||
840 | # CONFIG_MFD_WM8994 is not set | ||
841 | # CONFIG_MFD_PCF50633 is not set | ||
842 | # CONFIG_AB3100_CORE is not set | ||
843 | # CONFIG_LPC_SCH is not set | ||
844 | # CONFIG_REGULATOR is not set | ||
845 | # CONFIG_MEDIA_SUPPORT is not set | ||
846 | |||
847 | # | ||
848 | # Graphics support | ||
849 | # | ||
850 | # CONFIG_VGA_ARB is not set | ||
851 | # CONFIG_DRM is not set | ||
852 | # CONFIG_VGASTATE is not set | ||
853 | # CONFIG_VIDEO_OUTPUT_CONTROL is not set | ||
854 | # CONFIG_FB is not set | ||
855 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
856 | |||
857 | # | ||
858 | # Display device support | ||
859 | # | ||
860 | # CONFIG_DISPLAY_SUPPORT is not set | ||
861 | # CONFIG_SOUND is not set | ||
862 | # CONFIG_HID_SUPPORT is not set | ||
863 | # CONFIG_USB_SUPPORT is not set | ||
864 | # CONFIG_UWB is not set | ||
865 | # CONFIG_MMC is not set | ||
866 | # CONFIG_MEMSTICK is not set | ||
867 | # CONFIG_NEW_LEDS is not set | ||
868 | # CONFIG_ACCESSIBILITY is not set | ||
869 | # CONFIG_INFINIBAND is not set | ||
870 | CONFIG_RTC_LIB=y | ||
871 | CONFIG_RTC_CLASS=y | ||
872 | CONFIG_RTC_HCTOSYS=y | ||
873 | CONFIG_RTC_HCTOSYS_DEVICE="rtc0" | ||
874 | # CONFIG_RTC_DEBUG is not set | ||
875 | |||
876 | # | ||
877 | # RTC interfaces | ||
878 | # | ||
879 | # CONFIG_RTC_INTF_SYSFS is not set | ||
880 | # CONFIG_RTC_INTF_PROC is not set | ||
881 | CONFIG_RTC_INTF_DEV=y | ||
882 | # CONFIG_RTC_INTF_DEV_UIE_EMUL is not set | ||
883 | # CONFIG_RTC_DRV_TEST is not set | ||
884 | |||
885 | # | ||
886 | # I2C RTC drivers | ||
887 | # | ||
888 | # CONFIG_RTC_DRV_DS1307 is not set | ||
889 | # CONFIG_RTC_DRV_DS1374 is not set | ||
890 | # CONFIG_RTC_DRV_DS1672 is not set | ||
891 | # CONFIG_RTC_DRV_MAX6900 is not set | ||
892 | # CONFIG_RTC_DRV_RS5C372 is not set | ||
893 | # CONFIG_RTC_DRV_ISL1208 is not set | ||
894 | # CONFIG_RTC_DRV_X1205 is not set | ||
895 | # CONFIG_RTC_DRV_PCF8563 is not set | ||
896 | # CONFIG_RTC_DRV_PCF8583 is not set | ||
897 | # CONFIG_RTC_DRV_M41T80 is not set | ||
898 | # CONFIG_RTC_DRV_BQ32K is not set | ||
899 | # CONFIG_RTC_DRV_S35390A is not set | ||
900 | # CONFIG_RTC_DRV_FM3130 is not set | ||
901 | # CONFIG_RTC_DRV_RX8581 is not set | ||
902 | # CONFIG_RTC_DRV_RX8025 is not set | ||
903 | |||
904 | # | ||
905 | # SPI RTC drivers | ||
906 | # | ||
907 | |||
908 | # | ||
909 | # Platform RTC drivers | ||
910 | # | ||
911 | # CONFIG_RTC_DRV_DS1286 is not set | ||
912 | # CONFIG_RTC_DRV_DS1511 is not set | ||
913 | # CONFIG_RTC_DRV_DS1553 is not set | ||
914 | # CONFIG_RTC_DRV_DS1742 is not set | ||
915 | # CONFIG_RTC_DRV_STK17TA8 is not set | ||
916 | # CONFIG_RTC_DRV_M48T86 is not set | ||
917 | # CONFIG_RTC_DRV_M48T35 is not set | ||
918 | # CONFIG_RTC_DRV_M48T59 is not set | ||
919 | # CONFIG_RTC_DRV_MSM6242 is not set | ||
920 | # CONFIG_RTC_DRV_BQ4802 is not set | ||
921 | # CONFIG_RTC_DRV_RP5C01 is not set | ||
922 | # CONFIG_RTC_DRV_V3020 is not set | ||
923 | |||
924 | # | ||
925 | # on-CPU RTC drivers | ||
926 | # | ||
927 | # CONFIG_DMADEVICES is not set | ||
928 | # CONFIG_AUXDISPLAY is not set | ||
929 | # CONFIG_UIO is not set | ||
930 | |||
931 | # | ||
932 | # TI VLYNQ | ||
933 | # | ||
934 | # CONFIG_STAGING is not set | ||
935 | |||
936 | # | ||
937 | # File systems | ||
938 | # | ||
939 | CONFIG_EXT2_FS=y | ||
940 | # CONFIG_EXT2_FS_XATTR is not set | ||
941 | # CONFIG_EXT2_FS_XIP is not set | ||
942 | CONFIG_EXT3_FS=y | ||
943 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
944 | CONFIG_EXT3_FS_XATTR=y | ||
945 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
946 | # CONFIG_EXT3_FS_SECURITY is not set | ||
947 | # CONFIG_EXT4_FS is not set | ||
948 | CONFIG_JBD=y | ||
949 | CONFIG_FS_MBCACHE=y | ||
950 | # CONFIG_REISERFS_FS is not set | ||
951 | # CONFIG_JFS_FS is not set | ||
952 | # CONFIG_FS_POSIX_ACL is not set | ||
953 | # CONFIG_XFS_FS is not set | ||
954 | # CONFIG_GFS2_FS is not set | ||
955 | # CONFIG_OCFS2_FS is not set | ||
956 | # CONFIG_BTRFS_FS is not set | ||
957 | # CONFIG_NILFS2_FS is not set | ||
958 | CONFIG_FILE_LOCKING=y | ||
959 | CONFIG_FSNOTIFY=y | ||
960 | CONFIG_DNOTIFY=y | ||
961 | # CONFIG_INOTIFY is not set | ||
962 | CONFIG_INOTIFY_USER=y | ||
963 | # CONFIG_QUOTA is not set | ||
964 | # CONFIG_AUTOFS_FS is not set | ||
965 | # CONFIG_AUTOFS4_FS is not set | ||
966 | CONFIG_FUSE_FS=y | ||
967 | # CONFIG_CUSE is not set | ||
968 | |||
969 | # | ||
970 | # Caches | ||
971 | # | ||
972 | # CONFIG_FSCACHE is not set | ||
973 | |||
974 | # | ||
975 | # CD-ROM/DVD Filesystems | ||
976 | # | ||
977 | # CONFIG_ISO9660_FS is not set | ||
978 | # CONFIG_UDF_FS is not set | ||
979 | |||
980 | # | ||
981 | # DOS/FAT/NT Filesystems | ||
982 | # | ||
983 | CONFIG_FAT_FS=y | ||
984 | CONFIG_MSDOS_FS=y | ||
985 | CONFIG_VFAT_FS=m | ||
986 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
987 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | ||
988 | # CONFIG_NTFS_FS is not set | ||
989 | |||
990 | # | ||
991 | # Pseudo filesystems | ||
992 | # | ||
993 | CONFIG_PROC_FS=y | ||
994 | # CONFIG_PROC_KCORE is not set | ||
995 | CONFIG_PROC_SYSCTL=y | ||
996 | CONFIG_PROC_PAGE_MONITOR=y | ||
997 | CONFIG_SYSFS=y | ||
998 | CONFIG_TMPFS=y | ||
999 | # CONFIG_TMPFS_POSIX_ACL is not set | ||
1000 | CONFIG_HUGETLBFS=y | ||
1001 | CONFIG_HUGETLB_PAGE=y | ||
1002 | # CONFIG_CONFIGFS_FS is not set | ||
1003 | CONFIG_MISC_FILESYSTEMS=y | ||
1004 | # CONFIG_ADFS_FS is not set | ||
1005 | # CONFIG_AFFS_FS is not set | ||
1006 | # CONFIG_HFS_FS is not set | ||
1007 | # CONFIG_HFSPLUS_FS is not set | ||
1008 | # CONFIG_BEFS_FS is not set | ||
1009 | # CONFIG_BFS_FS is not set | ||
1010 | # CONFIG_EFS_FS is not set | ||
1011 | # CONFIG_LOGFS is not set | ||
1012 | # CONFIG_CRAMFS is not set | ||
1013 | # CONFIG_SQUASHFS is not set | ||
1014 | # CONFIG_VXFS_FS is not set | ||
1015 | # CONFIG_MINIX_FS is not set | ||
1016 | # CONFIG_OMFS_FS is not set | ||
1017 | # CONFIG_HPFS_FS is not set | ||
1018 | # CONFIG_QNX4FS_FS is not set | ||
1019 | # CONFIG_ROMFS_FS is not set | ||
1020 | # CONFIG_SYSV_FS is not set | ||
1021 | # CONFIG_UFS_FS is not set | ||
1022 | CONFIG_NETWORK_FILESYSTEMS=y | ||
1023 | CONFIG_NFS_FS=m | ||
1024 | CONFIG_NFS_V3=y | ||
1025 | # CONFIG_NFS_V3_ACL is not set | ||
1026 | # CONFIG_NFS_V4 is not set | ||
1027 | # CONFIG_NFSD is not set | ||
1028 | CONFIG_LOCKD=m | ||
1029 | CONFIG_LOCKD_V4=y | ||
1030 | CONFIG_NFS_COMMON=y | ||
1031 | CONFIG_SUNRPC=m | ||
1032 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
1033 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1034 | # CONFIG_SMB_FS is not set | ||
1035 | # CONFIG_CEPH_FS is not set | ||
1036 | # CONFIG_CIFS is not set | ||
1037 | # CONFIG_NCP_FS is not set | ||
1038 | # CONFIG_CODA_FS is not set | ||
1039 | # CONFIG_AFS_FS is not set | ||
1040 | |||
1041 | # | ||
1042 | # Partition Types | ||
1043 | # | ||
1044 | # CONFIG_PARTITION_ADVANCED is not set | ||
1045 | CONFIG_MSDOS_PARTITION=y | ||
1046 | CONFIG_NLS=y | ||
1047 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
1048 | CONFIG_NLS_CODEPAGE_437=y | ||
1049 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1050 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1051 | # CONFIG_NLS_CODEPAGE_850 is not set | ||
1052 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1053 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1054 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1055 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1056 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1057 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1058 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1059 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1060 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1061 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1062 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1063 | # CONFIG_NLS_CODEPAGE_936 is not set | ||
1064 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1065 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1066 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1067 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1068 | # CONFIG_NLS_ISO8859_8 is not set | ||
1069 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1070 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1071 | # CONFIG_NLS_ASCII is not set | ||
1072 | CONFIG_NLS_ISO8859_1=y | ||
1073 | # CONFIG_NLS_ISO8859_2 is not set | ||
1074 | # CONFIG_NLS_ISO8859_3 is not set | ||
1075 | # CONFIG_NLS_ISO8859_4 is not set | ||
1076 | # CONFIG_NLS_ISO8859_5 is not set | ||
1077 | # CONFIG_NLS_ISO8859_6 is not set | ||
1078 | # CONFIG_NLS_ISO8859_7 is not set | ||
1079 | # CONFIG_NLS_ISO8859_9 is not set | ||
1080 | # CONFIG_NLS_ISO8859_13 is not set | ||
1081 | # CONFIG_NLS_ISO8859_14 is not set | ||
1082 | # CONFIG_NLS_ISO8859_15 is not set | ||
1083 | # CONFIG_NLS_KOI8_R is not set | ||
1084 | # CONFIG_NLS_KOI8_U is not set | ||
1085 | # CONFIG_NLS_UTF8 is not set | ||
1086 | # CONFIG_DLM is not set | ||
1087 | |||
1088 | # | ||
1089 | # Kernel hacking | ||
1090 | # | ||
1091 | # CONFIG_PRINTK_TIME is not set | ||
1092 | CONFIG_ENABLE_WARN_DEPRECATED=y | ||
1093 | CONFIG_ENABLE_MUST_CHECK=y | ||
1094 | CONFIG_FRAME_WARN=2048 | ||
1095 | CONFIG_MAGIC_SYSRQ=y | ||
1096 | # CONFIG_STRIP_ASM_SYMS is not set | ||
1097 | # CONFIG_UNUSED_SYMBOLS is not set | ||
1098 | # CONFIG_DEBUG_FS is not set | ||
1099 | # CONFIG_HEADERS_CHECK is not set | ||
1100 | CONFIG_DEBUG_KERNEL=y | ||
1101 | # CONFIG_DEBUG_SHIRQ is not set | ||
1102 | CONFIG_DETECT_SOFTLOCKUP=y | ||
1103 | # CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set | ||
1104 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 | ||
1105 | CONFIG_DETECT_HUNG_TASK=y | ||
1106 | # CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set | ||
1107 | CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 | ||
1108 | CONFIG_SCHED_DEBUG=y | ||
1109 | # CONFIG_SCHEDSTATS is not set | ||
1110 | # CONFIG_TIMER_STATS is not set | ||
1111 | # CONFIG_DEBUG_OBJECTS is not set | ||
1112 | # CONFIG_SLUB_DEBUG_ON is not set | ||
1113 | # CONFIG_SLUB_STATS is not set | ||
1114 | # CONFIG_DEBUG_RT_MUTEXES is not set | ||
1115 | # CONFIG_RT_MUTEX_TESTER is not set | ||
1116 | # CONFIG_DEBUG_SPINLOCK is not set | ||
1117 | # CONFIG_DEBUG_MUTEXES is not set | ||
1118 | # CONFIG_DEBUG_LOCK_ALLOC is not set | ||
1119 | # CONFIG_PROVE_LOCKING is not set | ||
1120 | # CONFIG_LOCK_STAT is not set | ||
1121 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | ||
1122 | # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set | ||
1123 | CONFIG_STACKTRACE=y | ||
1124 | # CONFIG_DEBUG_KOBJECT is not set | ||
1125 | # CONFIG_DEBUG_HIGHMEM is not set | ||
1126 | CONFIG_DEBUG_INFO=y | ||
1127 | CONFIG_DEBUG_VM=y | ||
1128 | # CONFIG_DEBUG_WRITECOUNT is not set | ||
1129 | # CONFIG_DEBUG_MEMORY_INIT is not set | ||
1130 | # CONFIG_DEBUG_LIST is not set | ||
1131 | # CONFIG_DEBUG_SG is not set | ||
1132 | # CONFIG_DEBUG_NOTIFIERS is not set | ||
1133 | # CONFIG_DEBUG_CREDENTIALS is not set | ||
1134 | # CONFIG_RCU_TORTURE_TEST is not set | ||
1135 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
1136 | # CONFIG_BACKTRACE_SELF_TEST is not set | ||
1137 | # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set | ||
1138 | # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set | ||
1139 | # CONFIG_FAULT_INJECTION is not set | ||
1140 | # CONFIG_SYSCTL_SYSCALL_CHECK is not set | ||
1141 | # CONFIG_PAGE_POISONING is not set | ||
1142 | CONFIG_RING_BUFFER=y | ||
1143 | CONFIG_RING_BUFFER_ALLOW_SWAP=y | ||
1144 | CONFIG_TRACING_SUPPORT=y | ||
1145 | CONFIG_FTRACE=y | ||
1146 | # CONFIG_IRQSOFF_TRACER is not set | ||
1147 | # CONFIG_SCHED_TRACER is not set | ||
1148 | # CONFIG_ENABLE_DEFAULT_TRACERS is not set | ||
1149 | # CONFIG_BOOT_TRACER is not set | ||
1150 | CONFIG_BRANCH_PROFILE_NONE=y | ||
1151 | # CONFIG_PROFILE_ANNOTATED_BRANCHES is not set | ||
1152 | # CONFIG_PROFILE_ALL_BRANCHES is not set | ||
1153 | # CONFIG_KMEMTRACE is not set | ||
1154 | # CONFIG_WORKQUEUE_TRACER is not set | ||
1155 | # CONFIG_BLK_DEV_IO_TRACE is not set | ||
1156 | # CONFIG_RING_BUFFER_BENCHMARK is not set | ||
1157 | # CONFIG_SAMPLES is not set | ||
1158 | CONFIG_EARLY_PRINTK=y | ||
1159 | CONFIG_DEBUG_STACKOVERFLOW=y | ||
1160 | # CONFIG_DEBUG_STACK_USAGE is not set | ||
1161 | CONFIG_DEBUG_EXTRA_FLAGS="-femit-struct-debug-baseonly" | ||
1162 | |||
1163 | # | ||
1164 | # Security options | ||
1165 | # | ||
1166 | # CONFIG_KEYS is not set | ||
1167 | # CONFIG_SECURITY is not set | ||
1168 | # CONFIG_SECURITYFS is not set | ||
1169 | # CONFIG_DEFAULT_SECURITY_SELINUX is not set | ||
1170 | # CONFIG_DEFAULT_SECURITY_SMACK is not set | ||
1171 | # CONFIG_DEFAULT_SECURITY_TOMOYO is not set | ||
1172 | CONFIG_DEFAULT_SECURITY_DAC=y | ||
1173 | CONFIG_DEFAULT_SECURITY="" | ||
1174 | CONFIG_CRYPTO=y | ||
1175 | |||
1176 | # | ||
1177 | # Crypto core or helper | ||
1178 | # | ||
1179 | # CONFIG_CRYPTO_FIPS is not set | ||
1180 | CONFIG_CRYPTO_ALGAPI=m | ||
1181 | CONFIG_CRYPTO_ALGAPI2=m | ||
1182 | CONFIG_CRYPTO_RNG=m | ||
1183 | CONFIG_CRYPTO_RNG2=m | ||
1184 | # CONFIG_CRYPTO_MANAGER is not set | ||
1185 | # CONFIG_CRYPTO_MANAGER2 is not set | ||
1186 | # CONFIG_CRYPTO_GF128MUL is not set | ||
1187 | # CONFIG_CRYPTO_NULL is not set | ||
1188 | # CONFIG_CRYPTO_PCRYPT is not set | ||
1189 | # CONFIG_CRYPTO_CRYPTD is not set | ||
1190 | # CONFIG_CRYPTO_AUTHENC is not set | ||
1191 | # CONFIG_CRYPTO_TEST is not set | ||
1192 | |||
1193 | # | ||
1194 | # Authenticated Encryption with Associated Data | ||
1195 | # | ||
1196 | # CONFIG_CRYPTO_CCM is not set | ||
1197 | # CONFIG_CRYPTO_GCM is not set | ||
1198 | # CONFIG_CRYPTO_SEQIV is not set | ||
1199 | |||
1200 | # | ||
1201 | # Block modes | ||
1202 | # | ||
1203 | # CONFIG_CRYPTO_CBC is not set | ||
1204 | # CONFIG_CRYPTO_CTR is not set | ||
1205 | # CONFIG_CRYPTO_CTS is not set | ||
1206 | # CONFIG_CRYPTO_ECB is not set | ||
1207 | # CONFIG_CRYPTO_LRW is not set | ||
1208 | # CONFIG_CRYPTO_PCBC is not set | ||
1209 | # CONFIG_CRYPTO_XTS is not set | ||
1210 | |||
1211 | # | ||
1212 | # Hash modes | ||
1213 | # | ||
1214 | # CONFIG_CRYPTO_HMAC is not set | ||
1215 | # CONFIG_CRYPTO_XCBC is not set | ||
1216 | # CONFIG_CRYPTO_VMAC is not set | ||
1217 | |||
1218 | # | ||
1219 | # Digest | ||
1220 | # | ||
1221 | # CONFIG_CRYPTO_CRC32C is not set | ||
1222 | # CONFIG_CRYPTO_GHASH is not set | ||
1223 | # CONFIG_CRYPTO_MD4 is not set | ||
1224 | # CONFIG_CRYPTO_MD5 is not set | ||
1225 | # CONFIG_CRYPTO_MICHAEL_MIC is not set | ||
1226 | # CONFIG_CRYPTO_RMD128 is not set | ||
1227 | # CONFIG_CRYPTO_RMD160 is not set | ||
1228 | # CONFIG_CRYPTO_RMD256 is not set | ||
1229 | # CONFIG_CRYPTO_RMD320 is not set | ||
1230 | # CONFIG_CRYPTO_SHA1 is not set | ||
1231 | # CONFIG_CRYPTO_SHA256 is not set | ||
1232 | # CONFIG_CRYPTO_SHA512 is not set | ||
1233 | # CONFIG_CRYPTO_TGR192 is not set | ||
1234 | # CONFIG_CRYPTO_WP512 is not set | ||
1235 | |||
1236 | # | ||
1237 | # Ciphers | ||
1238 | # | ||
1239 | CONFIG_CRYPTO_AES=m | ||
1240 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1241 | # CONFIG_CRYPTO_ARC4 is not set | ||
1242 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1243 | # CONFIG_CRYPTO_CAMELLIA is not set | ||
1244 | # CONFIG_CRYPTO_CAST5 is not set | ||
1245 | # CONFIG_CRYPTO_CAST6 is not set | ||
1246 | # CONFIG_CRYPTO_DES is not set | ||
1247 | # CONFIG_CRYPTO_FCRYPT is not set | ||
1248 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1249 | # CONFIG_CRYPTO_SALSA20 is not set | ||
1250 | # CONFIG_CRYPTO_SEED is not set | ||
1251 | # CONFIG_CRYPTO_SERPENT is not set | ||
1252 | # CONFIG_CRYPTO_TEA is not set | ||
1253 | # CONFIG_CRYPTO_TWOFISH is not set | ||
1254 | |||
1255 | # | ||
1256 | # Compression | ||
1257 | # | ||
1258 | # CONFIG_CRYPTO_DEFLATE is not set | ||
1259 | # CONFIG_CRYPTO_ZLIB is not set | ||
1260 | # CONFIG_CRYPTO_LZO is not set | ||
1261 | |||
1262 | # | ||
1263 | # Random Number Generation | ||
1264 | # | ||
1265 | CONFIG_CRYPTO_ANSI_CPRNG=m | ||
1266 | CONFIG_CRYPTO_HW=y | ||
1267 | # CONFIG_CRYPTO_DEV_HIFN_795X is not set | ||
1268 | # CONFIG_BINARY_PRINTF is not set | ||
1269 | |||
1270 | # | ||
1271 | # Library routines | ||
1272 | # | ||
1273 | CONFIG_BITREVERSE=y | ||
1274 | CONFIG_GENERIC_FIND_FIRST_BIT=y | ||
1275 | CONFIG_GENERIC_FIND_NEXT_BIT=y | ||
1276 | CONFIG_GENERIC_FIND_LAST_BIT=y | ||
1277 | # CONFIG_CRC_CCITT is not set | ||
1278 | # CONFIG_CRC16 is not set | ||
1279 | # CONFIG_CRC_T10DIF is not set | ||
1280 | # CONFIG_CRC_ITU_T is not set | ||
1281 | CONFIG_CRC32=y | ||
1282 | # CONFIG_CRC7 is not set | ||
1283 | # CONFIG_LIBCRC32C is not set | ||
1284 | CONFIG_ZLIB_INFLATE=y | ||
1285 | CONFIG_DECOMPRESS_GZIP=y | ||
1286 | CONFIG_HAS_IOMEM=y | ||
1287 | CONFIG_HAS_IOPORT=y | ||
1288 | CONFIG_HAS_DMA=y | ||
1289 | CONFIG_NLATTR=y | ||
diff --git a/arch/tile/include/arch/abi.h b/arch/tile/include/arch/abi.h new file mode 100644 index 000000000000..7cdc47b3e02a --- /dev/null +++ b/arch/tile/include/arch/abi.h | |||
@@ -0,0 +1,93 @@ | |||
1 | // Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
2 | // | ||
3 | // This program is free software; you can redistribute it and/or | ||
4 | // modify it under the terms of the GNU General Public License | ||
5 | // as published by the Free Software Foundation, version 2. | ||
6 | // | ||
7 | // This program is distributed in the hope that it will be useful, but | ||
8 | // WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | // MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
10 | // NON INFRINGEMENT. See the GNU General Public License for | ||
11 | // more details. | ||
12 | |||
13 | //! @file | ||
14 | //! | ||
15 | //! ABI-related register definitions helpful when writing assembly code. | ||
16 | //! | ||
17 | |||
18 | #ifndef __ARCH_ABI_H__ | ||
19 | #define __ARCH_ABI_H__ | ||
20 | |||
21 | #include <arch/chip.h> | ||
22 | |||
23 | // Registers 0 - 55 are "normal", but some perform special roles. | ||
24 | |||
25 | #define TREG_FP 52 /**< Frame pointer. */ | ||
26 | #define TREG_TP 53 /**< Thread pointer. */ | ||
27 | #define TREG_SP 54 /**< Stack pointer. */ | ||
28 | #define TREG_LR 55 /**< Link to calling function PC. */ | ||
29 | |||
30 | /** Index of last normal general-purpose register. */ | ||
31 | #define TREG_LAST_GPR 55 | ||
32 | |||
33 | // Registers 56 - 62 are "special" network registers. | ||
34 | |||
35 | #define TREG_SN 56 /**< Static network access. */ | ||
36 | #define TREG_IDN0 57 /**< IDN demux 0 access. */ | ||
37 | #define TREG_IDN1 58 /**< IDN demux 1 access. */ | ||
38 | #define TREG_UDN0 59 /**< UDN demux 0 access. */ | ||
39 | #define TREG_UDN1 60 /**< UDN demux 1 access. */ | ||
40 | #define TREG_UDN2 61 /**< UDN demux 2 access. */ | ||
41 | #define TREG_UDN3 62 /**< UDN demux 3 access. */ | ||
42 | |||
43 | // Register 63 is the "special" zero register. | ||
44 | |||
45 | #define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */ | ||
46 | |||
47 | |||
48 | /** By convention, this register is used to hold the syscall number. */ | ||
49 | #define TREG_SYSCALL_NR 10 | ||
50 | |||
51 | /** Name of register that holds the syscall number, for use in assembly. */ | ||
52 | #define TREG_SYSCALL_NR_NAME r10 | ||
53 | |||
54 | |||
55 | //! The ABI requires callers to allocate a caller state save area of | ||
56 | //! this many bytes at the bottom of each stack frame. | ||
57 | //! | ||
58 | #ifdef __tile__ | ||
59 | #define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__) | ||
60 | #endif | ||
61 | |||
62 | //! The operand to an 'info' opcode directing the backtracer to not | ||
63 | //! try to find the calling frame. | ||
64 | //! | ||
65 | #define INFO_OP_CANNOT_BACKTRACE 2 | ||
66 | |||
67 | #ifndef __ASSEMBLER__ | ||
68 | #if CHIP_WORD_SIZE() > 32 | ||
69 | |||
70 | //! Unsigned type that can hold a register. | ||
71 | typedef unsigned long long uint_reg_t; | ||
72 | |||
73 | //! Signed type that can hold a register. | ||
74 | typedef long long int_reg_t; | ||
75 | |||
76 | //! String prefix to use for printf(). | ||
77 | #define INT_REG_FMT "ll" | ||
78 | |||
79 | #elif !defined(__LP64__) /* avoid confusion with LP64 cross-build tools */ | ||
80 | |||
81 | //! Unsigned type that can hold a register. | ||
82 | typedef unsigned long uint_reg_t; | ||
83 | |||
84 | //! Signed type that can hold a register. | ||
85 | typedef long int_reg_t; | ||
86 | |||
87 | //! String prefix to use for printf(). | ||
88 | #define INT_REG_FMT "l" | ||
89 | |||
90 | #endif | ||
91 | #endif /* __ASSEMBLER__ */ | ||
92 | |||
93 | #endif // !__ARCH_ABI_H__ | ||
diff --git a/arch/tile/include/arch/chip.h b/arch/tile/include/arch/chip.h new file mode 100644 index 000000000000..926d3db0e91e --- /dev/null +++ b/arch/tile/include/arch/chip.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if __tile_chip__ == 0 | ||
16 | #include <arch/chip_tile64.h> | ||
17 | #elif __tile_chip__ == 1 | ||
18 | #include <arch/chip_tilepro.h> | ||
19 | #elif defined(__tilegx__) | ||
20 | #include <arch/chip_tilegx.h> | ||
21 | #else | ||
22 | #error Unexpected Tilera chip type | ||
23 | #endif | ||
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h new file mode 100644 index 000000000000..18b5bc8e563f --- /dev/null +++ b/arch/tile/include/arch/chip_tile64.h | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * @file | ||
17 | * Global header file. | ||
18 | * This header file specifies defines for TILE64. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_CHIP_H__ | ||
22 | #define __ARCH_CHIP_H__ | ||
23 | |||
24 | /** Specify chip version. | ||
25 | * When possible, prefer the CHIP_xxx symbols below for future-proofing. | ||
26 | * This is intended for cross-compiling; native compilation should | ||
27 | * use the predefined __tile_chip__ symbol. | ||
28 | */ | ||
29 | #define TILE_CHIP 0 | ||
30 | |||
31 | /** Specify chip revision. | ||
32 | * This provides for the case of a respin of a particular chip type; | ||
33 | * the normal value for this symbol is "0". | ||
34 | * This is intended for cross-compiling; native compilation should | ||
35 | * use the predefined __tile_chip_rev__ symbol. | ||
36 | */ | ||
37 | #define TILE_CHIP_REV 0 | ||
38 | |||
39 | /** The name of this architecture. */ | ||
40 | #define CHIP_ARCH_NAME "tile64" | ||
41 | |||
42 | /** The ELF e_machine type for binaries for this chip. */ | ||
43 | #define CHIP_ELF_TYPE() EM_TILE64 | ||
44 | |||
45 | /** The alternate ELF e_machine type for binaries for this chip. */ | ||
46 | #define CHIP_COMPAT_ELF_TYPE() 0x2506 | ||
47 | |||
48 | /** What is the native word size of the machine? */ | ||
49 | #define CHIP_WORD_SIZE() 32 | ||
50 | |||
51 | /** How many bits of a virtual address are used. Extra bits must be | ||
52 | * the sign extension of the low bits. | ||
53 | */ | ||
54 | #define CHIP_VA_WIDTH() 32 | ||
55 | |||
56 | /** How many bits are in a physical address? */ | ||
57 | #define CHIP_PA_WIDTH() 36 | ||
58 | |||
59 | /** Size of the L2 cache, in bytes. */ | ||
60 | #define CHIP_L2_CACHE_SIZE() 65536 | ||
61 | |||
62 | /** Log size of an L2 cache line in bytes. */ | ||
63 | #define CHIP_L2_LOG_LINE_SIZE() 6 | ||
64 | |||
65 | /** Size of an L2 cache line, in bytes. */ | ||
66 | #define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE()) | ||
67 | |||
68 | /** Associativity of the L2 cache. */ | ||
69 | #define CHIP_L2_ASSOC() 2 | ||
70 | |||
71 | /** Size of the L1 data cache, in bytes. */ | ||
72 | #define CHIP_L1D_CACHE_SIZE() 8192 | ||
73 | |||
74 | /** Log size of an L1 data cache line in bytes. */ | ||
75 | #define CHIP_L1D_LOG_LINE_SIZE() 4 | ||
76 | |||
77 | /** Size of an L1 data cache line, in bytes. */ | ||
78 | #define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE()) | ||
79 | |||
80 | /** Associativity of the L1 data cache. */ | ||
81 | #define CHIP_L1D_ASSOC() 2 | ||
82 | |||
83 | /** Size of the L1 instruction cache, in bytes. */ | ||
84 | #define CHIP_L1I_CACHE_SIZE() 8192 | ||
85 | |||
86 | /** Log size of an L1 instruction cache line in bytes. */ | ||
87 | #define CHIP_L1I_LOG_LINE_SIZE() 6 | ||
88 | |||
89 | /** Size of an L1 instruction cache line, in bytes. */ | ||
90 | #define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE()) | ||
91 | |||
92 | /** Associativity of the L1 instruction cache. */ | ||
93 | #define CHIP_L1I_ASSOC() 1 | ||
94 | |||
95 | /** Stride with which flush instructions must be issued. */ | ||
96 | #define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE() | ||
97 | |||
98 | /** Stride with which inv instructions must be issued. */ | ||
99 | #define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE() | ||
100 | |||
101 | /** Stride with which finv instructions must be issued. */ | ||
102 | #define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE() | ||
103 | |||
104 | /** Can the local cache coherently cache data that is homed elsewhere? */ | ||
105 | #define CHIP_HAS_COHERENT_LOCAL_CACHE() 0 | ||
106 | |||
107 | /** How many simultaneous outstanding victims can the L2 cache have? */ | ||
108 | #define CHIP_MAX_OUTSTANDING_VICTIMS() 2 | ||
109 | |||
110 | /** Does the TLB support the NC and NOALLOC bits? */ | ||
111 | #define CHIP_HAS_NC_AND_NOALLOC_BITS() 0 | ||
112 | |||
113 | /** Does the chip support hash-for-home caching? */ | ||
114 | #define CHIP_HAS_CBOX_HOME_MAP() 0 | ||
115 | |||
116 | /** Number of entries in the chip's home map tables. */ | ||
117 | /* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */ | ||
118 | |||
119 | /** Do uncacheable requests miss in the cache regardless of whether | ||
120 | * there is matching data? */ | ||
121 | #define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0 | ||
122 | |||
123 | /** Does the mf instruction wait for victims? */ | ||
124 | #define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1 | ||
125 | |||
126 | /** Does the chip have an "inv" instruction that doesn't also flush? */ | ||
127 | #define CHIP_HAS_INV() 0 | ||
128 | |||
129 | /** Does the chip have a "wh64" instruction? */ | ||
130 | #define CHIP_HAS_WH64() 0 | ||
131 | |||
132 | /** Does this chip have a 'dword_align' instruction? */ | ||
133 | #define CHIP_HAS_DWORD_ALIGN() 0 | ||
134 | |||
135 | /** Number of performance counters. */ | ||
136 | #define CHIP_PERFORMANCE_COUNTERS() 2 | ||
137 | |||
138 | /** Does this chip have auxiliary performance counters? */ | ||
139 | #define CHIP_HAS_AUX_PERF_COUNTERS() 0 | ||
140 | |||
141 | /** Is the CBOX_MSR1 SPR supported? */ | ||
142 | #define CHIP_HAS_CBOX_MSR1() 0 | ||
143 | |||
144 | /** Is the TILE_RTF_HWM SPR supported? */ | ||
145 | #define CHIP_HAS_TILE_RTF_HWM() 0 | ||
146 | |||
147 | /** Is the TILE_WRITE_PENDING SPR supported? */ | ||
148 | #define CHIP_HAS_TILE_WRITE_PENDING() 0 | ||
149 | |||
150 | /** Is the PROC_STATUS SPR supported? */ | ||
151 | #define CHIP_HAS_PROC_STATUS_SPR() 0 | ||
152 | |||
153 | /** Log of the number of mshims we have. */ | ||
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | ||
155 | |||
156 | /** Are the bases of the interrupt vector areas fixed? */ | ||
157 | #define CHIP_HAS_FIXED_INTVEC_BASE() 1 | ||
158 | |||
159 | /** Are the interrupt masks split up into 2 SPRs? */ | ||
160 | #define CHIP_HAS_SPLIT_INTR_MASK() 1 | ||
161 | |||
162 | /** Is the cycle count split up into 2 SPRs? */ | ||
163 | #define CHIP_HAS_SPLIT_CYCLE() 1 | ||
164 | |||
165 | /** Does the chip have a static network? */ | ||
166 | #define CHIP_HAS_SN() 1 | ||
167 | |||
168 | /** Does the chip have a static network processor? */ | ||
169 | #define CHIP_HAS_SN_PROC() 1 | ||
170 | |||
171 | /** Size of the L1 static network processor instruction cache, in bytes. */ | ||
172 | #define CHIP_L1SNI_CACHE_SIZE() 2048 | ||
173 | |||
174 | /** Does the chip have DMA support in each tile? */ | ||
175 | #define CHIP_HAS_TILE_DMA() 1 | ||
176 | |||
177 | /** Does the chip have the second revision of the directly accessible | ||
178 | * dynamic networks? This encapsulates a number of characteristics, | ||
179 | * including the absence of the catch-all, the absence of inline message | ||
180 | * tags, the absence of support for network context-switching, and so on. | ||
181 | */ | ||
182 | #define CHIP_HAS_REV1_XDN() 0 | ||
183 | |||
184 | /** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */ | ||
185 | #define CHIP_HAS_CMPEXCH() 0 | ||
186 | |||
187 | /** Does the chip have memory-mapped I/O support? */ | ||
188 | #define CHIP_HAS_MMIO() 0 | ||
189 | |||
190 | /** Does the chip have post-completion interrupts? */ | ||
191 | #define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0 | ||
192 | |||
193 | /** Does the chip have native single step support? */ | ||
194 | #define CHIP_HAS_SINGLE_STEP() 0 | ||
195 | |||
196 | #ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */ | ||
197 | |||
198 | /** How many entries are present in the instruction TLB? */ | ||
199 | #define CHIP_ITLB_ENTRIES() 8 | ||
200 | |||
201 | /** How many entries are present in the data TLB? */ | ||
202 | #define CHIP_DTLB_ENTRIES() 16 | ||
203 | |||
204 | /** How many MAF entries does the XAUI shim have? */ | ||
205 | #define CHIP_XAUI_MAF_ENTRIES() 16 | ||
206 | |||
207 | /** Does the memory shim have a source-id table? */ | ||
208 | #define CHIP_HAS_MSHIM_SRCID_TABLE() 1 | ||
209 | |||
210 | /** Does the L1 instruction cache clear on reset? */ | ||
211 | #define CHIP_HAS_L1I_CLEAR_ON_RESET() 0 | ||
212 | |||
213 | /** Does the chip come out of reset with valid coordinates on all tiles? | ||
214 | * Note that if defined, this also implies that the upper left is 1,1. | ||
215 | */ | ||
216 | #define CHIP_HAS_VALID_TILE_COORD_RESET() 0 | ||
217 | |||
218 | /** Does the chip have unified packet formats? */ | ||
219 | #define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0 | ||
220 | |||
221 | /** Does the chip support write reordering? */ | ||
222 | #define CHIP_HAS_WRITE_REORDERING() 0 | ||
223 | |||
224 | /** Does the chip support Y-X routing as well as X-Y? */ | ||
225 | #define CHIP_HAS_Y_X_ROUTING() 0 | ||
226 | |||
227 | /** Is INTCTRL_3 managed with the correct MPL? */ | ||
228 | #define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0 | ||
229 | |||
230 | /** Is it possible to configure the chip to be big-endian? */ | ||
231 | #define CHIP_HAS_BIG_ENDIAN_CONFIG() 0 | ||
232 | |||
233 | /** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */ | ||
234 | #define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0 | ||
235 | |||
236 | /** Is the DIAG_TRACE_WAY SPR supported? */ | ||
237 | #define CHIP_HAS_DIAG_TRACE_WAY() 0 | ||
238 | |||
239 | /** Is the MEM_STRIPE_CONFIG SPR supported? */ | ||
240 | #define CHIP_HAS_MEM_STRIPE_CONFIG() 0 | ||
241 | |||
242 | /** Are the TLB_PERF SPRs supported? */ | ||
243 | #define CHIP_HAS_TLB_PERF() 0 | ||
244 | |||
245 | /** Is the VDN_SNOOP_SHIM_CTL SPR supported? */ | ||
246 | #define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0 | ||
247 | |||
248 | /** Does the chip support rev1 DMA packets? */ | ||
249 | #define CHIP_HAS_REV1_DMA_PACKETS() 0 | ||
250 | |||
251 | #endif /* !__OPEN_SOURCE__ */ | ||
252 | #endif /* __ARCH_CHIP_H__ */ | ||
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h new file mode 100644 index 000000000000..9852af163862 --- /dev/null +++ b/arch/tile/include/arch/chip_tilepro.h | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * @file | ||
17 | * Global header file. | ||
18 | * This header file specifies defines for TILEPro. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_CHIP_H__ | ||
22 | #define __ARCH_CHIP_H__ | ||
23 | |||
24 | /** Specify chip version. | ||
25 | * When possible, prefer the CHIP_xxx symbols below for future-proofing. | ||
26 | * This is intended for cross-compiling; native compilation should | ||
27 | * use the predefined __tile_chip__ symbol. | ||
28 | */ | ||
29 | #define TILE_CHIP 1 | ||
30 | |||
31 | /** Specify chip revision. | ||
32 | * This provides for the case of a respin of a particular chip type; | ||
33 | * the normal value for this symbol is "0". | ||
34 | * This is intended for cross-compiling; native compilation should | ||
35 | * use the predefined __tile_chip_rev__ symbol. | ||
36 | */ | ||
37 | #define TILE_CHIP_REV 0 | ||
38 | |||
39 | /** The name of this architecture. */ | ||
40 | #define CHIP_ARCH_NAME "tilepro" | ||
41 | |||
42 | /** The ELF e_machine type for binaries for this chip. */ | ||
43 | #define CHIP_ELF_TYPE() EM_TILEPRO | ||
44 | |||
45 | /** The alternate ELF e_machine type for binaries for this chip. */ | ||
46 | #define CHIP_COMPAT_ELF_TYPE() 0x2507 | ||
47 | |||
48 | /** What is the native word size of the machine? */ | ||
49 | #define CHIP_WORD_SIZE() 32 | ||
50 | |||
51 | /** How many bits of a virtual address are used. Extra bits must be | ||
52 | * the sign extension of the low bits. | ||
53 | */ | ||
54 | #define CHIP_VA_WIDTH() 32 | ||
55 | |||
56 | /** How many bits are in a physical address? */ | ||
57 | #define CHIP_PA_WIDTH() 36 | ||
58 | |||
59 | /** Size of the L2 cache, in bytes. */ | ||
60 | #define CHIP_L2_CACHE_SIZE() 65536 | ||
61 | |||
62 | /** Log size of an L2 cache line in bytes. */ | ||
63 | #define CHIP_L2_LOG_LINE_SIZE() 6 | ||
64 | |||
65 | /** Size of an L2 cache line, in bytes. */ | ||
66 | #define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE()) | ||
67 | |||
68 | /** Associativity of the L2 cache. */ | ||
69 | #define CHIP_L2_ASSOC() 4 | ||
70 | |||
71 | /** Size of the L1 data cache, in bytes. */ | ||
72 | #define CHIP_L1D_CACHE_SIZE() 8192 | ||
73 | |||
74 | /** Log size of an L1 data cache line in bytes. */ | ||
75 | #define CHIP_L1D_LOG_LINE_SIZE() 4 | ||
76 | |||
77 | /** Size of an L1 data cache line, in bytes. */ | ||
78 | #define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE()) | ||
79 | |||
80 | /** Associativity of the L1 data cache. */ | ||
81 | #define CHIP_L1D_ASSOC() 2 | ||
82 | |||
83 | /** Size of the L1 instruction cache, in bytes. */ | ||
84 | #define CHIP_L1I_CACHE_SIZE() 16384 | ||
85 | |||
86 | /** Log size of an L1 instruction cache line in bytes. */ | ||
87 | #define CHIP_L1I_LOG_LINE_SIZE() 6 | ||
88 | |||
89 | /** Size of an L1 instruction cache line, in bytes. */ | ||
90 | #define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE()) | ||
91 | |||
92 | /** Associativity of the L1 instruction cache. */ | ||
93 | #define CHIP_L1I_ASSOC() 1 | ||
94 | |||
95 | /** Stride with which flush instructions must be issued. */ | ||
96 | #define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE() | ||
97 | |||
98 | /** Stride with which inv instructions must be issued. */ | ||
99 | #define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE() | ||
100 | |||
101 | /** Stride with which finv instructions must be issued. */ | ||
102 | #define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE() | ||
103 | |||
104 | /** Can the local cache coherently cache data that is homed elsewhere? */ | ||
105 | #define CHIP_HAS_COHERENT_LOCAL_CACHE() 1 | ||
106 | |||
107 | /** How many simultaneous outstanding victims can the L2 cache have? */ | ||
108 | #define CHIP_MAX_OUTSTANDING_VICTIMS() 4 | ||
109 | |||
110 | /** Does the TLB support the NC and NOALLOC bits? */ | ||
111 | #define CHIP_HAS_NC_AND_NOALLOC_BITS() 1 | ||
112 | |||
113 | /** Does the chip support hash-for-home caching? */ | ||
114 | #define CHIP_HAS_CBOX_HOME_MAP() 1 | ||
115 | |||
116 | /** Number of entries in the chip's home map tables. */ | ||
117 | #define CHIP_CBOX_HOME_MAP_SIZE() 64 | ||
118 | |||
119 | /** Do uncacheable requests miss in the cache regardless of whether | ||
120 | * there is matching data? */ | ||
121 | #define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1 | ||
122 | |||
123 | /** Does the mf instruction wait for victims? */ | ||
124 | #define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0 | ||
125 | |||
126 | /** Does the chip have an "inv" instruction that doesn't also flush? */ | ||
127 | #define CHIP_HAS_INV() 1 | ||
128 | |||
129 | /** Does the chip have a "wh64" instruction? */ | ||
130 | #define CHIP_HAS_WH64() 1 | ||
131 | |||
132 | /** Does this chip have a 'dword_align' instruction? */ | ||
133 | #define CHIP_HAS_DWORD_ALIGN() 1 | ||
134 | |||
135 | /** Number of performance counters. */ | ||
136 | #define CHIP_PERFORMANCE_COUNTERS() 4 | ||
137 | |||
138 | /** Does this chip have auxiliary performance counters? */ | ||
139 | #define CHIP_HAS_AUX_PERF_COUNTERS() 1 | ||
140 | |||
141 | /** Is the CBOX_MSR1 SPR supported? */ | ||
142 | #define CHIP_HAS_CBOX_MSR1() 1 | ||
143 | |||
144 | /** Is the TILE_RTF_HWM SPR supported? */ | ||
145 | #define CHIP_HAS_TILE_RTF_HWM() 1 | ||
146 | |||
147 | /** Is the TILE_WRITE_PENDING SPR supported? */ | ||
148 | #define CHIP_HAS_TILE_WRITE_PENDING() 1 | ||
149 | |||
150 | /** Is the PROC_STATUS SPR supported? */ | ||
151 | #define CHIP_HAS_PROC_STATUS_SPR() 1 | ||
152 | |||
153 | /** Log of the number of mshims we have. */ | ||
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | ||
155 | |||
156 | /** Are the bases of the interrupt vector areas fixed? */ | ||
157 | #define CHIP_HAS_FIXED_INTVEC_BASE() 1 | ||
158 | |||
159 | /** Are the interrupt masks split up into 2 SPRs? */ | ||
160 | #define CHIP_HAS_SPLIT_INTR_MASK() 1 | ||
161 | |||
162 | /** Is the cycle count split up into 2 SPRs? */ | ||
163 | #define CHIP_HAS_SPLIT_CYCLE() 1 | ||
164 | |||
165 | /** Does the chip have a static network? */ | ||
166 | #define CHIP_HAS_SN() 1 | ||
167 | |||
168 | /** Does the chip have a static network processor? */ | ||
169 | #define CHIP_HAS_SN_PROC() 0 | ||
170 | |||
171 | /** Size of the L1 static network processor instruction cache, in bytes. */ | ||
172 | /* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 1 */ | ||
173 | |||
174 | /** Does the chip have DMA support in each tile? */ | ||
175 | #define CHIP_HAS_TILE_DMA() 1 | ||
176 | |||
177 | /** Does the chip have the second revision of the directly accessible | ||
178 | * dynamic networks? This encapsulates a number of characteristics, | ||
179 | * including the absence of the catch-all, the absence of inline message | ||
180 | * tags, the absence of support for network context-switching, and so on. | ||
181 | */ | ||
182 | #define CHIP_HAS_REV1_XDN() 0 | ||
183 | |||
184 | /** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */ | ||
185 | #define CHIP_HAS_CMPEXCH() 0 | ||
186 | |||
187 | /** Does the chip have memory-mapped I/O support? */ | ||
188 | #define CHIP_HAS_MMIO() 0 | ||
189 | |||
190 | /** Does the chip have post-completion interrupts? */ | ||
191 | #define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0 | ||
192 | |||
193 | /** Does the chip have native single step support? */ | ||
194 | #define CHIP_HAS_SINGLE_STEP() 0 | ||
195 | |||
196 | #ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */ | ||
197 | |||
198 | /** How many entries are present in the instruction TLB? */ | ||
199 | #define CHIP_ITLB_ENTRIES() 16 | ||
200 | |||
201 | /** How many entries are present in the data TLB? */ | ||
202 | #define CHIP_DTLB_ENTRIES() 16 | ||
203 | |||
204 | /** How many MAF entries does the XAUI shim have? */ | ||
205 | #define CHIP_XAUI_MAF_ENTRIES() 32 | ||
206 | |||
207 | /** Does the memory shim have a source-id table? */ | ||
208 | #define CHIP_HAS_MSHIM_SRCID_TABLE() 0 | ||
209 | |||
210 | /** Does the L1 instruction cache clear on reset? */ | ||
211 | #define CHIP_HAS_L1I_CLEAR_ON_RESET() 1 | ||
212 | |||
213 | /** Does the chip come out of reset with valid coordinates on all tiles? | ||
214 | * Note that if defined, this also implies that the upper left is 1,1. | ||
215 | */ | ||
216 | #define CHIP_HAS_VALID_TILE_COORD_RESET() 1 | ||
217 | |||
218 | /** Does the chip have unified packet formats? */ | ||
219 | #define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1 | ||
220 | |||
221 | /** Does the chip support write reordering? */ | ||
222 | #define CHIP_HAS_WRITE_REORDERING() 1 | ||
223 | |||
224 | /** Does the chip support Y-X routing as well as X-Y? */ | ||
225 | #define CHIP_HAS_Y_X_ROUTING() 1 | ||
226 | |||
227 | /** Is INTCTRL_3 managed with the correct MPL? */ | ||
228 | #define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1 | ||
229 | |||
230 | /** Is it possible to configure the chip to be big-endian? */ | ||
231 | #define CHIP_HAS_BIG_ENDIAN_CONFIG() 1 | ||
232 | |||
233 | /** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */ | ||
234 | #define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 1 | ||
235 | |||
236 | /** Is the DIAG_TRACE_WAY SPR supported? */ | ||
237 | #define CHIP_HAS_DIAG_TRACE_WAY() 1 | ||
238 | |||
239 | /** Is the MEM_STRIPE_CONFIG SPR supported? */ | ||
240 | #define CHIP_HAS_MEM_STRIPE_CONFIG() 1 | ||
241 | |||
242 | /** Are the TLB_PERF SPRs supported? */ | ||
243 | #define CHIP_HAS_TLB_PERF() 1 | ||
244 | |||
245 | /** Is the VDN_SNOOP_SHIM_CTL SPR supported? */ | ||
246 | #define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 1 | ||
247 | |||
248 | /** Does the chip support rev1 DMA packets? */ | ||
249 | #define CHIP_HAS_REV1_DMA_PACKETS() 1 | ||
250 | |||
251 | #endif /* !__OPEN_SOURCE__ */ | ||
252 | #endif /* __ARCH_CHIP_H__ */ | ||
diff --git a/arch/tile/include/arch/interrupts.h b/arch/tile/include/arch/interrupts.h new file mode 100644 index 000000000000..20f8f07d2de9 --- /dev/null +++ b/arch/tile/include/arch/interrupts.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifdef __tilegx__ | ||
16 | #include <arch/interrupts_64.h> | ||
17 | #else | ||
18 | #include <arch/interrupts_32.h> | ||
19 | #endif | ||
diff --git a/arch/tile/include/arch/interrupts_32.h b/arch/tile/include/arch/interrupts_32.h new file mode 100644 index 000000000000..feffada705f0 --- /dev/null +++ b/arch/tile/include/arch/interrupts_32.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | ||
16 | #define __ARCH_INTERRUPTS_H__ | ||
17 | |||
18 | /** Mask for an interrupt. */ | ||
19 | #ifdef __ASSEMBLER__ | ||
20 | /* Note: must handle breaking interrupts into high and low words manually. */ | ||
21 | #define INT_MASK(intno) (1 << (intno)) | ||
22 | #else | ||
23 | #define INT_MASK(intno) (1ULL << (intno)) | ||
24 | #endif | ||
25 | |||
26 | |||
27 | /** Where a given interrupt executes */ | ||
28 | #define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8)) | ||
29 | |||
30 | /** Where to store a vector for a given interrupt. */ | ||
31 | #define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0) | ||
32 | |||
33 | /** The base address of user-level interrupts. */ | ||
34 | #define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0) | ||
35 | |||
36 | |||
37 | /** Additional synthetic interrupt. */ | ||
38 | #define INT_BREAKPOINT (63) | ||
39 | |||
40 | #define INT_ITLB_MISS 0 | ||
41 | #define INT_MEM_ERROR 1 | ||
42 | #define INT_ILL 2 | ||
43 | #define INT_GPV 3 | ||
44 | #define INT_SN_ACCESS 4 | ||
45 | #define INT_IDN_ACCESS 5 | ||
46 | #define INT_UDN_ACCESS 6 | ||
47 | #define INT_IDN_REFILL 7 | ||
48 | #define INT_UDN_REFILL 8 | ||
49 | #define INT_IDN_COMPLETE 9 | ||
50 | #define INT_UDN_COMPLETE 10 | ||
51 | #define INT_SWINT_3 11 | ||
52 | #define INT_SWINT_2 12 | ||
53 | #define INT_SWINT_1 13 | ||
54 | #define INT_SWINT_0 14 | ||
55 | #define INT_UNALIGN_DATA 15 | ||
56 | #define INT_DTLB_MISS 16 | ||
57 | #define INT_DTLB_ACCESS 17 | ||
58 | #define INT_DMATLB_MISS 18 | ||
59 | #define INT_DMATLB_ACCESS 19 | ||
60 | #define INT_SNITLB_MISS 20 | ||
61 | #define INT_SN_NOTIFY 21 | ||
62 | #define INT_SN_FIREWALL 22 | ||
63 | #define INT_IDN_FIREWALL 23 | ||
64 | #define INT_UDN_FIREWALL 24 | ||
65 | #define INT_TILE_TIMER 25 | ||
66 | #define INT_IDN_TIMER 26 | ||
67 | #define INT_UDN_TIMER 27 | ||
68 | #define INT_DMA_NOTIFY 28 | ||
69 | #define INT_IDN_CA 29 | ||
70 | #define INT_UDN_CA 30 | ||
71 | #define INT_IDN_AVAIL 31 | ||
72 | #define INT_UDN_AVAIL 32 | ||
73 | #define INT_PERF_COUNT 33 | ||
74 | #define INT_INTCTRL_3 34 | ||
75 | #define INT_INTCTRL_2 35 | ||
76 | #define INT_INTCTRL_1 36 | ||
77 | #define INT_INTCTRL_0 37 | ||
78 | #define INT_BOOT_ACCESS 38 | ||
79 | #define INT_WORLD_ACCESS 39 | ||
80 | #define INT_I_ASID 40 | ||
81 | #define INT_D_ASID 41 | ||
82 | #define INT_DMA_ASID 42 | ||
83 | #define INT_SNI_ASID 43 | ||
84 | #define INT_DMA_CPL 44 | ||
85 | #define INT_SN_CPL 45 | ||
86 | #define INT_DOUBLE_FAULT 46 | ||
87 | #define INT_SN_STATIC_ACCESS 47 | ||
88 | #define INT_AUX_PERF_COUNT 48 | ||
89 | |||
90 | #define NUM_INTERRUPTS 49 | ||
91 | |||
92 | #define QUEUED_INTERRUPTS ( \ | ||
93 | INT_MASK(INT_MEM_ERROR) | \ | ||
94 | INT_MASK(INT_DMATLB_MISS) | \ | ||
95 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
96 | INT_MASK(INT_SNITLB_MISS) | \ | ||
97 | INT_MASK(INT_SN_NOTIFY) | \ | ||
98 | INT_MASK(INT_SN_FIREWALL) | \ | ||
99 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
100 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
101 | INT_MASK(INT_TILE_TIMER) | \ | ||
102 | INT_MASK(INT_IDN_TIMER) | \ | ||
103 | INT_MASK(INT_UDN_TIMER) | \ | ||
104 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
105 | INT_MASK(INT_IDN_CA) | \ | ||
106 | INT_MASK(INT_UDN_CA) | \ | ||
107 | INT_MASK(INT_IDN_AVAIL) | \ | ||
108 | INT_MASK(INT_UDN_AVAIL) | \ | ||
109 | INT_MASK(INT_PERF_COUNT) | \ | ||
110 | INT_MASK(INT_INTCTRL_3) | \ | ||
111 | INT_MASK(INT_INTCTRL_2) | \ | ||
112 | INT_MASK(INT_INTCTRL_1) | \ | ||
113 | INT_MASK(INT_INTCTRL_0) | \ | ||
114 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
115 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
116 | INT_MASK(INT_I_ASID) | \ | ||
117 | INT_MASK(INT_D_ASID) | \ | ||
118 | INT_MASK(INT_DMA_ASID) | \ | ||
119 | INT_MASK(INT_SNI_ASID) | \ | ||
120 | INT_MASK(INT_DMA_CPL) | \ | ||
121 | INT_MASK(INT_SN_CPL) | \ | ||
122 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
123 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
124 | 0) | ||
125 | #define NONQUEUED_INTERRUPTS ( \ | ||
126 | INT_MASK(INT_ITLB_MISS) | \ | ||
127 | INT_MASK(INT_ILL) | \ | ||
128 | INT_MASK(INT_GPV) | \ | ||
129 | INT_MASK(INT_SN_ACCESS) | \ | ||
130 | INT_MASK(INT_IDN_ACCESS) | \ | ||
131 | INT_MASK(INT_UDN_ACCESS) | \ | ||
132 | INT_MASK(INT_IDN_REFILL) | \ | ||
133 | INT_MASK(INT_UDN_REFILL) | \ | ||
134 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
135 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
136 | INT_MASK(INT_SWINT_3) | \ | ||
137 | INT_MASK(INT_SWINT_2) | \ | ||
138 | INT_MASK(INT_SWINT_1) | \ | ||
139 | INT_MASK(INT_SWINT_0) | \ | ||
140 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
141 | INT_MASK(INT_DTLB_MISS) | \ | ||
142 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
143 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
144 | 0) | ||
145 | #define CRITICAL_MASKED_INTERRUPTS ( \ | ||
146 | INT_MASK(INT_MEM_ERROR) | \ | ||
147 | INT_MASK(INT_DMATLB_MISS) | \ | ||
148 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
149 | INT_MASK(INT_SNITLB_MISS) | \ | ||
150 | INT_MASK(INT_SN_NOTIFY) | \ | ||
151 | INT_MASK(INT_SN_FIREWALL) | \ | ||
152 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
153 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
154 | INT_MASK(INT_TILE_TIMER) | \ | ||
155 | INT_MASK(INT_IDN_TIMER) | \ | ||
156 | INT_MASK(INT_UDN_TIMER) | \ | ||
157 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
158 | INT_MASK(INT_IDN_CA) | \ | ||
159 | INT_MASK(INT_UDN_CA) | \ | ||
160 | INT_MASK(INT_IDN_AVAIL) | \ | ||
161 | INT_MASK(INT_UDN_AVAIL) | \ | ||
162 | INT_MASK(INT_PERF_COUNT) | \ | ||
163 | INT_MASK(INT_INTCTRL_3) | \ | ||
164 | INT_MASK(INT_INTCTRL_2) | \ | ||
165 | INT_MASK(INT_INTCTRL_1) | \ | ||
166 | INT_MASK(INT_INTCTRL_0) | \ | ||
167 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
168 | 0) | ||
169 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | ||
170 | INT_MASK(INT_ITLB_MISS) | \ | ||
171 | INT_MASK(INT_ILL) | \ | ||
172 | INT_MASK(INT_GPV) | \ | ||
173 | INT_MASK(INT_SN_ACCESS) | \ | ||
174 | INT_MASK(INT_IDN_ACCESS) | \ | ||
175 | INT_MASK(INT_UDN_ACCESS) | \ | ||
176 | INT_MASK(INT_IDN_REFILL) | \ | ||
177 | INT_MASK(INT_UDN_REFILL) | \ | ||
178 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
179 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
180 | INT_MASK(INT_SWINT_3) | \ | ||
181 | INT_MASK(INT_SWINT_2) | \ | ||
182 | INT_MASK(INT_SWINT_1) | \ | ||
183 | INT_MASK(INT_SWINT_0) | \ | ||
184 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
185 | INT_MASK(INT_DTLB_MISS) | \ | ||
186 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
187 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
188 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
189 | INT_MASK(INT_I_ASID) | \ | ||
190 | INT_MASK(INT_D_ASID) | \ | ||
191 | INT_MASK(INT_DMA_ASID) | \ | ||
192 | INT_MASK(INT_SNI_ASID) | \ | ||
193 | INT_MASK(INT_DMA_CPL) | \ | ||
194 | INT_MASK(INT_SN_CPL) | \ | ||
195 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
196 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
197 | 0) | ||
198 | #define MASKABLE_INTERRUPTS ( \ | ||
199 | INT_MASK(INT_MEM_ERROR) | \ | ||
200 | INT_MASK(INT_IDN_REFILL) | \ | ||
201 | INT_MASK(INT_UDN_REFILL) | \ | ||
202 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
203 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
204 | INT_MASK(INT_DMATLB_MISS) | \ | ||
205 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
206 | INT_MASK(INT_SNITLB_MISS) | \ | ||
207 | INT_MASK(INT_SN_NOTIFY) | \ | ||
208 | INT_MASK(INT_SN_FIREWALL) | \ | ||
209 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
210 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
211 | INT_MASK(INT_TILE_TIMER) | \ | ||
212 | INT_MASK(INT_IDN_TIMER) | \ | ||
213 | INT_MASK(INT_UDN_TIMER) | \ | ||
214 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
215 | INT_MASK(INT_IDN_CA) | \ | ||
216 | INT_MASK(INT_UDN_CA) | \ | ||
217 | INT_MASK(INT_IDN_AVAIL) | \ | ||
218 | INT_MASK(INT_UDN_AVAIL) | \ | ||
219 | INT_MASK(INT_PERF_COUNT) | \ | ||
220 | INT_MASK(INT_INTCTRL_3) | \ | ||
221 | INT_MASK(INT_INTCTRL_2) | \ | ||
222 | INT_MASK(INT_INTCTRL_1) | \ | ||
223 | INT_MASK(INT_INTCTRL_0) | \ | ||
224 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
225 | 0) | ||
226 | #define UNMASKABLE_INTERRUPTS ( \ | ||
227 | INT_MASK(INT_ITLB_MISS) | \ | ||
228 | INT_MASK(INT_ILL) | \ | ||
229 | INT_MASK(INT_GPV) | \ | ||
230 | INT_MASK(INT_SN_ACCESS) | \ | ||
231 | INT_MASK(INT_IDN_ACCESS) | \ | ||
232 | INT_MASK(INT_UDN_ACCESS) | \ | ||
233 | INT_MASK(INT_SWINT_3) | \ | ||
234 | INT_MASK(INT_SWINT_2) | \ | ||
235 | INT_MASK(INT_SWINT_1) | \ | ||
236 | INT_MASK(INT_SWINT_0) | \ | ||
237 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
238 | INT_MASK(INT_DTLB_MISS) | \ | ||
239 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
240 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
241 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
242 | INT_MASK(INT_I_ASID) | \ | ||
243 | INT_MASK(INT_D_ASID) | \ | ||
244 | INT_MASK(INT_DMA_ASID) | \ | ||
245 | INT_MASK(INT_SNI_ASID) | \ | ||
246 | INT_MASK(INT_DMA_CPL) | \ | ||
247 | INT_MASK(INT_SN_CPL) | \ | ||
248 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
249 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
250 | 0) | ||
251 | #define SYNC_INTERRUPTS ( \ | ||
252 | INT_MASK(INT_ITLB_MISS) | \ | ||
253 | INT_MASK(INT_ILL) | \ | ||
254 | INT_MASK(INT_GPV) | \ | ||
255 | INT_MASK(INT_SN_ACCESS) | \ | ||
256 | INT_MASK(INT_IDN_ACCESS) | \ | ||
257 | INT_MASK(INT_UDN_ACCESS) | \ | ||
258 | INT_MASK(INT_IDN_REFILL) | \ | ||
259 | INT_MASK(INT_UDN_REFILL) | \ | ||
260 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
261 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
262 | INT_MASK(INT_SWINT_3) | \ | ||
263 | INT_MASK(INT_SWINT_2) | \ | ||
264 | INT_MASK(INT_SWINT_1) | \ | ||
265 | INT_MASK(INT_SWINT_0) | \ | ||
266 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
267 | INT_MASK(INT_DTLB_MISS) | \ | ||
268 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
269 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
270 | 0) | ||
271 | #define NON_SYNC_INTERRUPTS ( \ | ||
272 | INT_MASK(INT_MEM_ERROR) | \ | ||
273 | INT_MASK(INT_DMATLB_MISS) | \ | ||
274 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
275 | INT_MASK(INT_SNITLB_MISS) | \ | ||
276 | INT_MASK(INT_SN_NOTIFY) | \ | ||
277 | INT_MASK(INT_SN_FIREWALL) | \ | ||
278 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
279 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
280 | INT_MASK(INT_TILE_TIMER) | \ | ||
281 | INT_MASK(INT_IDN_TIMER) | \ | ||
282 | INT_MASK(INT_UDN_TIMER) | \ | ||
283 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
284 | INT_MASK(INT_IDN_CA) | \ | ||
285 | INT_MASK(INT_UDN_CA) | \ | ||
286 | INT_MASK(INT_IDN_AVAIL) | \ | ||
287 | INT_MASK(INT_UDN_AVAIL) | \ | ||
288 | INT_MASK(INT_PERF_COUNT) | \ | ||
289 | INT_MASK(INT_INTCTRL_3) | \ | ||
290 | INT_MASK(INT_INTCTRL_2) | \ | ||
291 | INT_MASK(INT_INTCTRL_1) | \ | ||
292 | INT_MASK(INT_INTCTRL_0) | \ | ||
293 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
294 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
295 | INT_MASK(INT_I_ASID) | \ | ||
296 | INT_MASK(INT_D_ASID) | \ | ||
297 | INT_MASK(INT_DMA_ASID) | \ | ||
298 | INT_MASK(INT_SNI_ASID) | \ | ||
299 | INT_MASK(INT_DMA_CPL) | \ | ||
300 | INT_MASK(INT_SN_CPL) | \ | ||
301 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
302 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
303 | 0) | ||
304 | #endif // !__ARCH_INTERRUPTS_H__ | ||
diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h new file mode 100644 index 000000000000..6418fbde063e --- /dev/null +++ b/arch/tile/include/arch/sim_def.h | |||
@@ -0,0 +1,512 @@ | |||
1 | // Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
2 | // | ||
3 | // This program is free software; you can redistribute it and/or | ||
4 | // modify it under the terms of the GNU General Public License | ||
5 | // as published by the Free Software Foundation, version 2. | ||
6 | // | ||
7 | // This program is distributed in the hope that it will be useful, but | ||
8 | // WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | // MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
10 | // NON INFRINGEMENT. See the GNU General Public License for | ||
11 | // more details. | ||
12 | |||
13 | //! @file | ||
14 | //! | ||
15 | //! Some low-level simulator definitions. | ||
16 | //! | ||
17 | |||
18 | #ifndef __ARCH_SIM_DEF_H__ | ||
19 | #define __ARCH_SIM_DEF_H__ | ||
20 | |||
21 | |||
22 | //! Internal: the low bits of the SIM_CONTROL_* SPR values specify | ||
23 | //! the operation to perform, and the remaining bits are | ||
24 | //! an operation-specific parameter (often unused). | ||
25 | //! | ||
26 | #define _SIM_CONTROL_OPERATOR_BITS 8 | ||
27 | |||
28 | |||
29 | //== Values which can be written to SPR_SIM_CONTROL. | ||
30 | |||
31 | //! If written to SPR_SIM_CONTROL, stops profiling. | ||
32 | //! | ||
33 | #define SIM_CONTROL_PROFILER_DISABLE 0 | ||
34 | |||
35 | //! If written to SPR_SIM_CONTROL, starts profiling. | ||
36 | //! | ||
37 | #define SIM_CONTROL_PROFILER_ENABLE 1 | ||
38 | |||
39 | //! If written to SPR_SIM_CONTROL, clears profiling counters. | ||
40 | //! | ||
41 | #define SIM_CONTROL_PROFILER_CLEAR 2 | ||
42 | |||
43 | //! If written to SPR_SIM_CONTROL, checkpoints the simulator. | ||
44 | //! | ||
45 | #define SIM_CONTROL_CHECKPOINT 3 | ||
46 | |||
47 | //! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), | ||
48 | //! sets the tracing mask to the given mask. See "sim_set_tracing()". | ||
49 | //! | ||
50 | #define SIM_CONTROL_SET_TRACING 4 | ||
51 | |||
52 | //! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), | ||
53 | //! dumps the requested items of machine state to the log. | ||
54 | //! | ||
55 | #define SIM_CONTROL_DUMP 5 | ||
56 | |||
57 | //! If written to SPR_SIM_CONTROL, clears chip-level profiling counters. | ||
58 | //! | ||
59 | #define SIM_CONTROL_PROFILER_CHIP_CLEAR 6 | ||
60 | |||
61 | //! If written to SPR_SIM_CONTROL, disables chip-level profiling. | ||
62 | //! | ||
63 | #define SIM_CONTROL_PROFILER_CHIP_DISABLE 7 | ||
64 | |||
65 | //! If written to SPR_SIM_CONTROL, enables chip-level profiling. | ||
66 | //! | ||
67 | #define SIM_CONTROL_PROFILER_CHIP_ENABLE 8 | ||
68 | |||
69 | //! If written to SPR_SIM_CONTROL, enables chip-level functional mode | ||
70 | //! | ||
71 | #define SIM_CONTROL_ENABLE_FUNCTIONAL 9 | ||
72 | |||
73 | //! If written to SPR_SIM_CONTROL, disables chip-level functional mode. | ||
74 | //! | ||
75 | #define SIM_CONTROL_DISABLE_FUNCTIONAL 10 | ||
76 | |||
77 | //! If written to SPR_SIM_CONTROL, enables chip-level functional mode. | ||
78 | //! All tiles must perform this write for functional mode to be enabled. | ||
79 | //! Ignored in naked boot mode unless --functional is specified. | ||
80 | //! WARNING: Only the hypervisor startup code should use this! | ||
81 | //! | ||
82 | #define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11 | ||
83 | |||
84 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
85 | //! writes a string directly to the simulator output. Written to once for | ||
86 | //! each character in the string, plus a final NUL. Instead of NUL, | ||
87 | //! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY". | ||
88 | //! | ||
89 | // ISSUE: Document the meaning of "newline", and the handling of NUL. | ||
90 | // | ||
91 | #define SIM_CONTROL_PUTC 12 | ||
92 | |||
93 | //! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for | ||
94 | //! this core. This is intended to be used before a loop that will | ||
95 | //! invalidate the cache by loading new data and evicting all current data. | ||
96 | //! Generally speaking, this API should only be used by system code. | ||
97 | //! | ||
98 | #define SIM_CONTROL_GRINDER_CLEAR 13 | ||
99 | |||
100 | //! If written to SPR_SIM_CONTROL, shuts down the simulator. | ||
101 | //! | ||
102 | #define SIM_CONTROL_SHUTDOWN 14 | ||
103 | |||
104 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
105 | //! indicates that a fork syscall just created the given process. | ||
106 | //! | ||
107 | #define SIM_CONTROL_OS_FORK 15 | ||
108 | |||
109 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
110 | //! indicates that an exit syscall was just executed by the given process. | ||
111 | //! | ||
112 | #define SIM_CONTROL_OS_EXIT 16 | ||
113 | |||
114 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
115 | //! indicates that the OS just switched to the given process. | ||
116 | //! | ||
117 | #define SIM_CONTROL_OS_SWITCH 17 | ||
118 | |||
119 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
120 | //! indicates that an exec syscall was just executed. Written to once for | ||
121 | //! each character in the executable name, plus a final NUL. | ||
122 | //! | ||
123 | #define SIM_CONTROL_OS_EXEC 18 | ||
124 | |||
125 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
126 | //! indicates that an interpreter (PT_INTERP) was loaded. Written to once | ||
127 | //! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a | ||
128 | //! hex load address starting with "0x", and "PATH" is the executable name. | ||
129 | //! | ||
130 | #define SIM_CONTROL_OS_INTERP 19 | ||
131 | |||
132 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
133 | //! indicates that a dll was loaded. Written to once for each character | ||
134 | //! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load | ||
135 | //! address starting with "0x", and "PATH" is the executable name. | ||
136 | //! | ||
137 | #define SIM_CONTROL_DLOPEN 20 | ||
138 | |||
139 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
140 | //! indicates that a dll was unloaded. Written to once for each character | ||
141 | //! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load | ||
142 | //! address starting with "0x". | ||
143 | //! | ||
144 | #define SIM_CONTROL_DLCLOSE 21 | ||
145 | |||
146 | //! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8), | ||
147 | //! indicates whether to allow data reads to remotely-cached | ||
148 | //! dirty cache lines to be cached locally without grinder warnings or | ||
149 | //! assertions (used by Linux kernel fast memcpy). | ||
150 | //! | ||
151 | #define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22 | ||
152 | |||
153 | //! If written to SPR_SIM_CONTROL, enables memory tracing. | ||
154 | //! | ||
155 | #define SIM_CONTROL_ENABLE_MEM_LOGGING 23 | ||
156 | |||
157 | //! If written to SPR_SIM_CONTROL, disables memory tracing. | ||
158 | //! | ||
159 | #define SIM_CONTROL_DISABLE_MEM_LOGGING 24 | ||
160 | |||
161 | //! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of | ||
162 | //! the gbe or xgbe shims. Must specify the shim id, the type, the units, and | ||
163 | //! the rate, as defined in SIM_SHAPING_SPR_ARG. | ||
164 | //! | ||
165 | #define SIM_CONTROL_SHAPING 25 | ||
166 | |||
167 | //! If written to SPR_SIM_CONTROL, combined with character (shifted by 8), | ||
168 | //! requests that a simulator command be executed. Written to once for each | ||
169 | //! character in the command, plus a final NUL. | ||
170 | //! | ||
171 | #define SIM_CONTROL_COMMAND 26 | ||
172 | |||
173 | //! If written to SPR_SIM_CONTROL, indicates that the simulated system | ||
174 | //! is panicking, to allow debugging via --debug-on-panic. | ||
175 | //! | ||
176 | #define SIM_CONTROL_PANIC 27 | ||
177 | |||
178 | //! If written to SPR_SIM_CONTROL, triggers a simulator syscall. | ||
179 | //! See "sim_syscall()" for more info. | ||
180 | //! | ||
181 | #define SIM_CONTROL_SYSCALL 32 | ||
182 | |||
183 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
184 | //! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should | ||
185 | //! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH. | ||
186 | //! | ||
187 | #define SIM_CONTROL_OS_FORK_PARENT 33 | ||
188 | |||
189 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
190 | //! (shifted by 8), clears the pending magic data section. The cleared | ||
191 | //! pending magic data section and any subsequently appended magic bytes | ||
192 | //! will only take effect when the classifier blast programmer is run. | ||
193 | #define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34 | ||
194 | |||
195 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
196 | //! (shifted by 8) and a byte of data (shifted by 16), appends that byte | ||
197 | //! to the shim's pending magic data section. The pending magic data | ||
198 | //! section takes effect when the classifier blast programmer is run. | ||
199 | #define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35 | ||
200 | |||
201 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
202 | //! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a | ||
203 | //! mask of links (shifted by 32), enable or disable the corresponding | ||
204 | //! mPIPE links. | ||
205 | #define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36 | ||
206 | |||
207 | //== Syscall numbers for use with "sim_syscall()". | ||
208 | |||
209 | //! Syscall number for sim_add_watchpoint(). | ||
210 | //! | ||
211 | #define SIM_SYSCALL_ADD_WATCHPOINT 2 | ||
212 | |||
213 | //! Syscall number for sim_remove_watchpoint(). | ||
214 | //! | ||
215 | #define SIM_SYSCALL_REMOVE_WATCHPOINT 3 | ||
216 | |||
217 | //! Syscall number for sim_query_watchpoint(). | ||
218 | //! | ||
219 | #define SIM_SYSCALL_QUERY_WATCHPOINT 4 | ||
220 | |||
221 | //! Syscall number that asserts that the cache lines whose 64-bit PA | ||
222 | //! is passed as the second argument to sim_syscall(), and over a | ||
223 | //! range passed as the third argument, are no longer in cache. | ||
224 | //! The simulator raises an error if this is not the case. | ||
225 | //! | ||
226 | #define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5 | ||
227 | |||
228 | |||
229 | //== Bit masks which can be shifted by 8, combined with | ||
230 | //== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL. | ||
231 | |||
232 | //! @addtogroup arch_sim | ||
233 | //! @{ | ||
234 | |||
235 | //! Enable --trace-cycle when passed to simulator_set_tracing(). | ||
236 | //! | ||
237 | #define SIM_TRACE_CYCLES 0x01 | ||
238 | |||
239 | //! Enable --trace-router when passed to simulator_set_tracing(). | ||
240 | //! | ||
241 | #define SIM_TRACE_ROUTER 0x02 | ||
242 | |||
243 | //! Enable --trace-register-writes when passed to simulator_set_tracing(). | ||
244 | //! | ||
245 | #define SIM_TRACE_REGISTER_WRITES 0x04 | ||
246 | |||
247 | //! Enable --trace-disasm when passed to simulator_set_tracing(). | ||
248 | //! | ||
249 | #define SIM_TRACE_DISASM 0x08 | ||
250 | |||
251 | //! Enable --trace-stall-info when passed to simulator_set_tracing(). | ||
252 | //! | ||
253 | #define SIM_TRACE_STALL_INFO 0x10 | ||
254 | |||
255 | //! Enable --trace-memory-controller when passed to simulator_set_tracing(). | ||
256 | //! | ||
257 | #define SIM_TRACE_MEMORY_CONTROLLER 0x20 | ||
258 | |||
259 | //! Enable --trace-l2 when passed to simulator_set_tracing(). | ||
260 | //! | ||
261 | #define SIM_TRACE_L2_CACHE 0x40 | ||
262 | |||
263 | //! Enable --trace-lines when passed to simulator_set_tracing(). | ||
264 | //! | ||
265 | #define SIM_TRACE_LINES 0x80 | ||
266 | |||
267 | //! Turn off all tracing when passed to simulator_set_tracing(). | ||
268 | //! | ||
269 | #define SIM_TRACE_NONE 0 | ||
270 | |||
271 | //! Turn on all tracing when passed to simulator_set_tracing(). | ||
272 | //! | ||
273 | #define SIM_TRACE_ALL (-1) | ||
274 | |||
275 | //! @} | ||
276 | |||
277 | //! Computes the value to write to SPR_SIM_CONTROL to set tracing flags. | ||
278 | //! | ||
279 | #define SIM_TRACE_SPR_ARG(mask) \ | ||
280 | (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
281 | |||
282 | |||
283 | //== Bit masks which can be shifted by 8, combined with | ||
284 | //== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL. | ||
285 | |||
286 | //! @addtogroup arch_sim | ||
287 | //! @{ | ||
288 | |||
289 | //! Dump the general-purpose registers. | ||
290 | //! | ||
291 | #define SIM_DUMP_REGS 0x001 | ||
292 | |||
293 | //! Dump the SPRs. | ||
294 | //! | ||
295 | #define SIM_DUMP_SPRS 0x002 | ||
296 | |||
297 | //! Dump the ITLB. | ||
298 | //! | ||
299 | #define SIM_DUMP_ITLB 0x004 | ||
300 | |||
301 | //! Dump the DTLB. | ||
302 | //! | ||
303 | #define SIM_DUMP_DTLB 0x008 | ||
304 | |||
305 | //! Dump the L1 I-cache. | ||
306 | //! | ||
307 | #define SIM_DUMP_L1I 0x010 | ||
308 | |||
309 | //! Dump the L1 D-cache. | ||
310 | //! | ||
311 | #define SIM_DUMP_L1D 0x020 | ||
312 | |||
313 | //! Dump the L2 cache. | ||
314 | //! | ||
315 | #define SIM_DUMP_L2 0x040 | ||
316 | |||
317 | //! Dump the switch registers. | ||
318 | //! | ||
319 | #define SIM_DUMP_SNREGS 0x080 | ||
320 | |||
321 | //! Dump the switch ITLB. | ||
322 | //! | ||
323 | #define SIM_DUMP_SNITLB 0x100 | ||
324 | |||
325 | //! Dump the switch L1 I-cache. | ||
326 | //! | ||
327 | #define SIM_DUMP_SNL1I 0x200 | ||
328 | |||
329 | //! Dump the current backtrace. | ||
330 | //! | ||
331 | #define SIM_DUMP_BACKTRACE 0x400 | ||
332 | |||
333 | //! Only dump valid lines in caches. | ||
334 | //! | ||
335 | #define SIM_DUMP_VALID_LINES 0x800 | ||
336 | |||
337 | //! Dump everything that is dumpable. | ||
338 | //! | ||
339 | #define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES) | ||
340 | |||
341 | // @} | ||
342 | |||
343 | //! Computes the value to write to SPR_SIM_CONTROL to dump machine state. | ||
344 | //! | ||
345 | #define SIM_DUMP_SPR_ARG(mask) \ | ||
346 | (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
347 | |||
348 | |||
349 | //== Bit masks which can be shifted by 8, combined with | ||
350 | //== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL. | ||
351 | |||
352 | //! @addtogroup arch_sim | ||
353 | //! @{ | ||
354 | |||
355 | //! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. | ||
356 | //! | ||
357 | #define SIM_CHIP_MEMCTL 0x001 | ||
358 | |||
359 | //! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. | ||
360 | //! | ||
361 | #define SIM_CHIP_XAUI 0x002 | ||
362 | |||
363 | //! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. | ||
364 | //! | ||
365 | #define SIM_CHIP_PCIE 0x004 | ||
366 | |||
367 | //! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. | ||
368 | //! | ||
369 | #define SIM_CHIP_MPIPE 0x008 | ||
370 | |||
371 | //! Reference all chip devices. | ||
372 | //! | ||
373 | #define SIM_CHIP_ALL (-1) | ||
374 | |||
375 | //! @} | ||
376 | |||
377 | //! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. | ||
378 | //! | ||
379 | #define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \ | ||
380 | (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
381 | |||
382 | //! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics. | ||
383 | //! | ||
384 | #define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \ | ||
385 | (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
386 | |||
387 | //! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. | ||
388 | //! | ||
389 | #define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \ | ||
390 | (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
391 | |||
392 | |||
393 | |||
394 | // Shim bitrate controls. | ||
395 | |||
396 | //! The number of bits used to store the shim id. | ||
397 | //! | ||
398 | #define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3 | ||
399 | |||
400 | //! @addtogroup arch_sim | ||
401 | //! @{ | ||
402 | |||
403 | //! Change the gbe 0 bitrate. | ||
404 | //! | ||
405 | #define SIM_CONTROL_SHAPING_GBE_0 0x0 | ||
406 | |||
407 | //! Change the gbe 1 bitrate. | ||
408 | //! | ||
409 | #define SIM_CONTROL_SHAPING_GBE_1 0x1 | ||
410 | |||
411 | //! Change the gbe 2 bitrate. | ||
412 | //! | ||
413 | #define SIM_CONTROL_SHAPING_GBE_2 0x2 | ||
414 | |||
415 | //! Change the gbe 3 bitrate. | ||
416 | //! | ||
417 | #define SIM_CONTROL_SHAPING_GBE_3 0x3 | ||
418 | |||
419 | //! Change the xgbe 0 bitrate. | ||
420 | //! | ||
421 | #define SIM_CONTROL_SHAPING_XGBE_0 0x4 | ||
422 | |||
423 | //! Change the xgbe 1 bitrate. | ||
424 | //! | ||
425 | #define SIM_CONTROL_SHAPING_XGBE_1 0x5 | ||
426 | |||
427 | //! The type of shaping to do. | ||
428 | //! | ||
429 | #define SIM_CONTROL_SHAPING_TYPE_BITS 2 | ||
430 | |||
431 | //! Control the multiplier. | ||
432 | //! | ||
433 | #define SIM_CONTROL_SHAPING_MULTIPLIER 0 | ||
434 | |||
435 | //! Control the PPS. | ||
436 | //! | ||
437 | #define SIM_CONTROL_SHAPING_PPS 1 | ||
438 | |||
439 | //! Control the BPS. | ||
440 | //! | ||
441 | #define SIM_CONTROL_SHAPING_BPS 2 | ||
442 | |||
443 | //! The number of bits for the units for the shaping parameter. | ||
444 | //! | ||
445 | #define SIM_CONTROL_SHAPING_UNITS_BITS 2 | ||
446 | |||
447 | //! Provide a number in single units. | ||
448 | //! | ||
449 | #define SIM_CONTROL_SHAPING_UNITS_SINGLE 0 | ||
450 | |||
451 | //! Provide a number in kilo units. | ||
452 | //! | ||
453 | #define SIM_CONTROL_SHAPING_UNITS_KILO 1 | ||
454 | |||
455 | //! Provide a number in mega units. | ||
456 | //! | ||
457 | #define SIM_CONTROL_SHAPING_UNITS_MEGA 2 | ||
458 | |||
459 | //! Provide a number in giga units. | ||
460 | //! | ||
461 | #define SIM_CONTROL_SHAPING_UNITS_GIGA 3 | ||
462 | |||
463 | // @} | ||
464 | |||
465 | //! How many bits are available for the rate. | ||
466 | //! | ||
467 | #define SIM_CONTROL_SHAPING_RATE_BITS \ | ||
468 | (32 - (_SIM_CONTROL_OPERATOR_BITS + \ | ||
469 | SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
470 | SIM_CONTROL_SHAPING_TYPE_BITS + \ | ||
471 | SIM_CONTROL_SHAPING_UNITS_BITS)) | ||
472 | |||
473 | //! Computes the value to write to SPR_SIM_CONTROL to change a bitrate. | ||
474 | //! | ||
475 | #define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \ | ||
476 | (SIM_CONTROL_SHAPING | \ | ||
477 | ((shim) | \ | ||
478 | ((type) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS)) | \ | ||
479 | ((units) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
480 | SIM_CONTROL_SHAPING_TYPE_BITS)) | \ | ||
481 | ((rate) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
482 | SIM_CONTROL_SHAPING_TYPE_BITS + \ | ||
483 | SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS) | ||
484 | |||
485 | |||
486 | //== Values returned when reading SPR_SIM_CONTROL. | ||
487 | // ISSUE: These names should share a longer common prefix. | ||
488 | |||
489 | //! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits | ||
490 | //! (SIM_TRACE_xxx values). | ||
491 | //! | ||
492 | #define SIM_TRACE_FLAG_MASK 0xFFFF | ||
493 | |||
494 | //! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. | ||
495 | //! | ||
496 | #define SIM_PROFILER_ENABLED_MASK 0x10000 | ||
497 | |||
498 | |||
499 | //== Special arguments for "SIM_CONTROL_PUTC". | ||
500 | |||
501 | //! Flag value for forcing a PUTC string-flush, including | ||
502 | //! coordinate/cycle prefix and newline. | ||
503 | //! | ||
504 | #define SIM_PUTC_FLUSH_STRING 0x100 | ||
505 | |||
506 | //! Flag value for forcing a PUTC binary-data-flush, which skips the | ||
507 | //! prefix and does not append a newline. | ||
508 | //! | ||
509 | #define SIM_PUTC_FLUSH_BINARY 0x101 | ||
510 | |||
511 | |||
512 | #endif //__ARCH_SIM_DEF_H__ | ||
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h new file mode 100644 index 000000000000..c8fdbd9a45e6 --- /dev/null +++ b/arch/tile/include/arch/spr_def.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifdef __tilegx__ | ||
16 | #include <arch/spr_def_64.h> | ||
17 | #else | ||
18 | #include <arch/spr_def_32.h> | ||
19 | #endif | ||
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h new file mode 100644 index 000000000000..b4fc06864df6 --- /dev/null +++ b/arch/tile/include/arch/spr_def_32.h | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef __DOXYGEN__ | ||
16 | |||
17 | #ifndef __ARCH_SPR_DEF_H__ | ||
18 | #define __ARCH_SPR_DEF_H__ | ||
19 | |||
20 | #define SPR_AUX_PERF_COUNT_0 0x6005 | ||
21 | #define SPR_AUX_PERF_COUNT_1 0x6006 | ||
22 | #define SPR_AUX_PERF_COUNT_CTL 0x6007 | ||
23 | #define SPR_AUX_PERF_COUNT_STS 0x6008 | ||
24 | #define SPR_CYCLE_HIGH 0x4e06 | ||
25 | #define SPR_CYCLE_LOW 0x4e07 | ||
26 | #define SPR_DMA_BYTE 0x3900 | ||
27 | #define SPR_DMA_CHUNK_SIZE 0x3901 | ||
28 | #define SPR_DMA_CTR 0x3902 | ||
29 | #define SPR_DMA_CTR__REQUEST_MASK 0x1 | ||
30 | #define SPR_DMA_CTR__SUSPEND_MASK 0x2 | ||
31 | #define SPR_DMA_DST_ADDR 0x3903 | ||
32 | #define SPR_DMA_DST_CHUNK_ADDR 0x3904 | ||
33 | #define SPR_DMA_SRC_ADDR 0x3905 | ||
34 | #define SPR_DMA_SRC_CHUNK_ADDR 0x3906 | ||
35 | #define SPR_DMA_STATUS__DONE_MASK 0x1 | ||
36 | #define SPR_DMA_STATUS__BUSY_MASK 0x2 | ||
37 | #define SPR_DMA_STATUS__RUNNING_MASK 0x10 | ||
38 | #define SPR_DMA_STRIDE 0x3907 | ||
39 | #define SPR_DMA_USER_STATUS 0x3908 | ||
40 | #define SPR_DONE 0x4e08 | ||
41 | #define SPR_EVENT_BEGIN 0x4e0d | ||
42 | #define SPR_EVENT_END 0x4e0e | ||
43 | #define SPR_EX_CONTEXT_0_0 0x4a05 | ||
44 | #define SPR_EX_CONTEXT_0_1 0x4a06 | ||
45 | #define SPR_EX_CONTEXT_0_1__PL_SHIFT 0 | ||
46 | #define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3 | ||
47 | #define SPR_EX_CONTEXT_0_1__PL_MASK 0x3 | ||
48 | #define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2 | ||
49 | #define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1 | ||
50 | #define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4 | ||
51 | #define SPR_EX_CONTEXT_1_0 0x4805 | ||
52 | #define SPR_EX_CONTEXT_1_1 0x4806 | ||
53 | #define SPR_EX_CONTEXT_1_1__PL_SHIFT 0 | ||
54 | #define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3 | ||
55 | #define SPR_EX_CONTEXT_1_1__PL_MASK 0x3 | ||
56 | #define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 | ||
57 | #define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 | ||
58 | #define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 | ||
59 | #define SPR_FAIL 0x4e09 | ||
60 | #define SPR_INTCTRL_0_STATUS 0x4a07 | ||
61 | #define SPR_INTCTRL_1_STATUS 0x4807 | ||
62 | #define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a | ||
63 | #define SPR_INTERRUPT_MASK_0_0 0x4a08 | ||
64 | #define SPR_INTERRUPT_MASK_0_1 0x4a09 | ||
65 | #define SPR_INTERRUPT_MASK_1_0 0x4809 | ||
66 | #define SPR_INTERRUPT_MASK_1_1 0x480a | ||
67 | #define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a | ||
68 | #define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b | ||
69 | #define SPR_INTERRUPT_MASK_RESET_1_0 0x480b | ||
70 | #define SPR_INTERRUPT_MASK_RESET_1_1 0x480c | ||
71 | #define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c | ||
72 | #define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d | ||
73 | #define SPR_INTERRUPT_MASK_SET_1_0 0x480d | ||
74 | #define SPR_INTERRUPT_MASK_SET_1_1 0x480e | ||
75 | #define SPR_MPL_DMA_CPL_SET_0 0x5800 | ||
76 | #define SPR_MPL_DMA_CPL_SET_1 0x5801 | ||
77 | #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 | ||
78 | #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 | ||
79 | #define SPR_MPL_INTCTRL_0_SET_0 0x4a00 | ||
80 | #define SPR_MPL_INTCTRL_0_SET_1 0x4a01 | ||
81 | #define SPR_MPL_INTCTRL_1_SET_0 0x4800 | ||
82 | #define SPR_MPL_INTCTRL_1_SET_1 0x4801 | ||
83 | #define SPR_MPL_SN_ACCESS_SET_0 0x0800 | ||
84 | #define SPR_MPL_SN_ACCESS_SET_1 0x0801 | ||
85 | #define SPR_MPL_SN_CPL_SET_0 0x5a00 | ||
86 | #define SPR_MPL_SN_CPL_SET_1 0x5a01 | ||
87 | #define SPR_MPL_SN_FIREWALL_SET_0 0x2c00 | ||
88 | #define SPR_MPL_SN_FIREWALL_SET_1 0x2c01 | ||
89 | #define SPR_MPL_SN_NOTIFY_SET_0 0x2a00 | ||
90 | #define SPR_MPL_SN_NOTIFY_SET_1 0x2a01 | ||
91 | #define SPR_MPL_UDN_ACCESS_SET_0 0x0c00 | ||
92 | #define SPR_MPL_UDN_ACCESS_SET_1 0x0c01 | ||
93 | #define SPR_MPL_UDN_AVAIL_SET_0 0x4000 | ||
94 | #define SPR_MPL_UDN_AVAIL_SET_1 0x4001 | ||
95 | #define SPR_MPL_UDN_CA_SET_0 0x3c00 | ||
96 | #define SPR_MPL_UDN_CA_SET_1 0x3c01 | ||
97 | #define SPR_MPL_UDN_COMPLETE_SET_0 0x1400 | ||
98 | #define SPR_MPL_UDN_COMPLETE_SET_1 0x1401 | ||
99 | #define SPR_MPL_UDN_FIREWALL_SET_0 0x3000 | ||
100 | #define SPR_MPL_UDN_FIREWALL_SET_1 0x3001 | ||
101 | #define SPR_MPL_UDN_REFILL_SET_0 0x1000 | ||
102 | #define SPR_MPL_UDN_REFILL_SET_1 0x1001 | ||
103 | #define SPR_MPL_UDN_TIMER_SET_0 0x3600 | ||
104 | #define SPR_MPL_UDN_TIMER_SET_1 0x3601 | ||
105 | #define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00 | ||
106 | #define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01 | ||
107 | #define SPR_PASS 0x4e0b | ||
108 | #define SPR_PERF_COUNT_0 0x4205 | ||
109 | #define SPR_PERF_COUNT_1 0x4206 | ||
110 | #define SPR_PERF_COUNT_CTL 0x4207 | ||
111 | #define SPR_PERF_COUNT_STS 0x4208 | ||
112 | #define SPR_PROC_STATUS 0x4f00 | ||
113 | #define SPR_SIM_CONTROL 0x4e0c | ||
114 | #define SPR_SNCTL 0x0805 | ||
115 | #define SPR_SNCTL__FRZFABRIC_MASK 0x1 | ||
116 | #define SPR_SNCTL__FRZPROC_MASK 0x2 | ||
117 | #define SPR_SNPC 0x080b | ||
118 | #define SPR_SNSTATIC 0x080c | ||
119 | #define SPR_SYSTEM_SAVE_0_0 0x4b00 | ||
120 | #define SPR_SYSTEM_SAVE_0_1 0x4b01 | ||
121 | #define SPR_SYSTEM_SAVE_0_2 0x4b02 | ||
122 | #define SPR_SYSTEM_SAVE_0_3 0x4b03 | ||
123 | #define SPR_SYSTEM_SAVE_1_0 0x4900 | ||
124 | #define SPR_SYSTEM_SAVE_1_1 0x4901 | ||
125 | #define SPR_SYSTEM_SAVE_1_2 0x4902 | ||
126 | #define SPR_SYSTEM_SAVE_1_3 0x4903 | ||
127 | #define SPR_TILE_COORD 0x4c17 | ||
128 | #define SPR_TILE_RTF_HWM 0x4e10 | ||
129 | #define SPR_TILE_TIMER_CONTROL 0x3205 | ||
130 | #define SPR_TILE_WRITE_PENDING 0x4e0f | ||
131 | #define SPR_UDN_AVAIL_EN 0x4005 | ||
132 | #define SPR_UDN_CA_DATA 0x0d00 | ||
133 | #define SPR_UDN_DATA_AVAIL 0x0d03 | ||
134 | #define SPR_UDN_DEADLOCK_TIMEOUT 0x3606 | ||
135 | #define SPR_UDN_DEMUX_CA_COUNT 0x0c05 | ||
136 | #define SPR_UDN_DEMUX_COUNT_0 0x0c06 | ||
137 | #define SPR_UDN_DEMUX_COUNT_1 0x0c07 | ||
138 | #define SPR_UDN_DEMUX_COUNT_2 0x0c08 | ||
139 | #define SPR_UDN_DEMUX_COUNT_3 0x0c09 | ||
140 | #define SPR_UDN_DEMUX_CTL 0x0c0a | ||
141 | #define SPR_UDN_DEMUX_QUEUE_SEL 0x0c0c | ||
142 | #define SPR_UDN_DEMUX_STATUS 0x0c0d | ||
143 | #define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e | ||
144 | #define SPR_UDN_DIRECTION_PROTECT 0x3005 | ||
145 | #define SPR_UDN_REFILL_EN 0x1005 | ||
146 | #define SPR_UDN_SP_FIFO_DATA 0x0c11 | ||
147 | #define SPR_UDN_SP_FIFO_SEL 0x0c12 | ||
148 | #define SPR_UDN_SP_FREEZE 0x0c13 | ||
149 | #define SPR_UDN_SP_FREEZE__SP_FRZ_MASK 0x1 | ||
150 | #define SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2 | ||
151 | #define SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4 | ||
152 | #define SPR_UDN_SP_STATE 0x0c14 | ||
153 | #define SPR_UDN_TAG_0 0x0c15 | ||
154 | #define SPR_UDN_TAG_1 0x0c16 | ||
155 | #define SPR_UDN_TAG_2 0x0c17 | ||
156 | #define SPR_UDN_TAG_3 0x0c18 | ||
157 | #define SPR_UDN_TAG_VALID 0x0c19 | ||
158 | #define SPR_UDN_TILE_COORD 0x0c1a | ||
159 | |||
160 | #endif /* !defined(__ARCH_SPR_DEF_H__) */ | ||
161 | |||
162 | #endif /* !defined(__DOXYGEN__) */ | ||
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild new file mode 100644 index 000000000000..3b8f55b82dee --- /dev/null +++ b/arch/tile/include/asm/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | |||
3 | header-y += ucontext.h | ||
diff --git a/arch/tile/include/asm/asm-offsets.h b/arch/tile/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/tile/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h new file mode 100644 index 000000000000..b8c49f98a44c --- /dev/null +++ b/arch/tile/include/asm/atomic.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Atomic primitives. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_H | ||
18 | #define _ASM_TILE_ATOMIC_H | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #define ATOMIC_INIT(i) { (i) } | ||
26 | |||
27 | /** | ||
28 | * atomic_read - read atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * | ||
31 | * Atomically reads the value of @v. | ||
32 | */ | ||
33 | static inline int atomic_read(const atomic_t *v) | ||
34 | { | ||
35 | return v->counter; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_sub_return - subtract integer and return | ||
40 | * @v: pointer of type atomic_t | ||
41 | * @i: integer value to subtract | ||
42 | * | ||
43 | * Atomically subtracts @i from @v and returns @v - @i | ||
44 | */ | ||
45 | #define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) | ||
46 | |||
47 | /** | ||
48 | * atomic_sub - subtract integer from atomic variable | ||
49 | * @i: integer value to subtract | ||
50 | * @v: pointer of type atomic_t | ||
51 | * | ||
52 | * Atomically subtracts @i from @v. | ||
53 | */ | ||
54 | #define atomic_sub(i, v) atomic_add((int)(-(i)), (v)) | ||
55 | |||
56 | /** | ||
57 | * atomic_sub_and_test - subtract value from variable and test result | ||
58 | * @i: integer value to subtract | ||
59 | * @v: pointer of type atomic_t | ||
60 | * | ||
61 | * Atomically subtracts @i from @v and returns true if the result is | ||
62 | * zero, or false for all other cases. | ||
63 | */ | ||
64 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
65 | |||
66 | /** | ||
67 | * atomic_inc_return - increment memory and return | ||
68 | * @v: pointer of type atomic_t | ||
69 | * | ||
70 | * Atomically increments @v by 1 and returns the new value. | ||
71 | */ | ||
72 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
73 | |||
74 | /** | ||
75 | * atomic_dec_return - decrement memory and return | ||
76 | * @v: pointer of type atomic_t | ||
77 | * | ||
78 | * Atomically decrements @v by 1 and returns the new value. | ||
79 | */ | ||
80 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
81 | |||
82 | /** | ||
83 | * atomic_inc - increment atomic variable | ||
84 | * @v: pointer of type atomic_t | ||
85 | * | ||
86 | * Atomically increments @v by 1. | ||
87 | */ | ||
88 | #define atomic_inc(v) atomic_add(1, (v)) | ||
89 | |||
90 | /** | ||
91 | * atomic_dec - decrement atomic variable | ||
92 | * @v: pointer of type atomic_t | ||
93 | * | ||
94 | * Atomically decrements @v by 1. | ||
95 | */ | ||
96 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
97 | |||
98 | /** | ||
99 | * atomic_dec_and_test - decrement and test | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1 and returns true if the result is 0. | ||
103 | */ | ||
104 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
105 | |||
106 | /** | ||
107 | * atomic_inc_and_test - increment and test | ||
108 | * @v: pointer of type atomic_t | ||
109 | * | ||
110 | * Atomically increments @v by 1 and returns true if the result is 0. | ||
111 | */ | ||
112 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
113 | |||
114 | /** | ||
115 | * atomic_add_negative - add and test if negative | ||
116 | * @v: pointer of type atomic_t | ||
117 | * @i: integer value to add | ||
118 | * | ||
119 | * Atomically adds @i to @v and returns true if the result is | ||
120 | * negative, or false when result is greater than or equal to zero. | ||
121 | */ | ||
122 | #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) | ||
123 | |||
124 | /** | ||
125 | * atomic_inc_not_zero - increment unless the number is zero | ||
126 | * @v: pointer of type atomic_t | ||
127 | * | ||
128 | * Atomically increments @v by 1, so long as @v is non-zero. | ||
129 | * Returns non-zero if @v was non-zero, and zero otherwise. | ||
130 | */ | ||
131 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
132 | |||
133 | |||
134 | /* | ||
135 | * We define xchg() and cmpxchg() in the included headers. | ||
136 | * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply | ||
137 | * that cmpxchg() is an efficient operation, which is not particularly true. | ||
138 | */ | ||
139 | |||
140 | /* Nonexistent functions intended to cause link errors. */ | ||
141 | extern unsigned long __xchg_called_with_bad_pointer(void); | ||
142 | extern unsigned long __cmpxchg_called_with_bad_pointer(void); | ||
143 | |||
144 | #define tas(ptr) (xchg((ptr), 1)) | ||
145 | |||
146 | #endif /* __ASSEMBLY__ */ | ||
147 | |||
148 | #ifndef __tilegx__ | ||
149 | #include <asm/atomic_32.h> | ||
150 | #else | ||
151 | #include <asm/atomic_64.h> | ||
152 | #endif | ||
153 | |||
154 | /* Provide the appropriate atomic_long_t definitions. */ | ||
155 | #ifndef __ASSEMBLY__ | ||
156 | #include <asm-generic/atomic-long.h> | ||
157 | #endif | ||
158 | |||
159 | #endif /* _ASM_TILE_ATOMIC_H */ | ||
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h new file mode 100644 index 000000000000..e4f8b4f04895 --- /dev/null +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Do not include directly; use <asm/atomic.h>. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | ||
18 | #define _ASM_TILE_ATOMIC_32_H | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | /* Tile-specific routines to support <asm/atomic.h>. */ | ||
25 | int _atomic_xchg(atomic_t *v, int n); | ||
26 | int _atomic_xchg_add(atomic_t *v, int i); | ||
27 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u); | ||
28 | int _atomic_cmpxchg(atomic_t *v, int o, int n); | ||
29 | |||
30 | /** | ||
31 | * atomic_xchg - atomically exchange contents of memory with a new value | ||
32 | * @v: pointer of type atomic_t | ||
33 | * @i: integer value to store in memory | ||
34 | * | ||
35 | * Atomically sets @v to @i and returns old @v | ||
36 | */ | ||
37 | static inline int atomic_xchg(atomic_t *v, int n) | ||
38 | { | ||
39 | smp_mb(); /* barrier for proper semantics */ | ||
40 | return _atomic_xchg(v, n); | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * atomic_cmpxchg - atomically exchange contents of memory if it matches | ||
45 | * @v: pointer of type atomic_t | ||
46 | * @o: old value that memory should have | ||
47 | * @n: new value to write to memory if it matches | ||
48 | * | ||
49 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
50 | * Returns the old value at @v. | ||
51 | */ | ||
52 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | ||
53 | { | ||
54 | smp_mb(); /* barrier for proper semantics */ | ||
55 | return _atomic_cmpxchg(v, o, n); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * atomic_add - add integer to atomic variable | ||
60 | * @i: integer value to add | ||
61 | * @v: pointer of type atomic_t | ||
62 | * | ||
63 | * Atomically adds @i to @v. | ||
64 | */ | ||
65 | static inline void atomic_add(int i, atomic_t *v) | ||
66 | { | ||
67 | _atomic_xchg_add(v, i); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * atomic_add_return - add integer and return | ||
72 | * @v: pointer of type atomic_t | ||
73 | * @i: integer value to add | ||
74 | * | ||
75 | * Atomically adds @i to @v and returns @i + @v | ||
76 | */ | ||
77 | static inline int atomic_add_return(int i, atomic_t *v) | ||
78 | { | ||
79 | smp_mb(); /* barrier for proper semantics */ | ||
80 | return _atomic_xchg_add(v, i) + i; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic_add_unless - add unless the number is already a given value | ||
85 | * @v: pointer of type atomic_t | ||
86 | * @a: the amount to add to v... | ||
87 | * @u: ...unless v is equal to u. | ||
88 | * | ||
89 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
90 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
91 | */ | ||
92 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
93 | { | ||
94 | smp_mb(); /* barrier for proper semantics */ | ||
95 | return _atomic_xchg_add_unless(v, a, u) != u; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_set - set atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * @i: required value | ||
102 | * | ||
103 | * Atomically sets the value of @v to @i. | ||
104 | * | ||
105 | * atomic_set() can't be just a raw store, since it would be lost if it | ||
106 | * fell between the load and store of one of the other atomic ops. | ||
107 | */ | ||
108 | static inline void atomic_set(atomic_t *v, int n) | ||
109 | { | ||
110 | _atomic_xchg(v, n); | ||
111 | } | ||
112 | |||
113 | #define xchg(ptr, x) ((typeof(*(ptr))) \ | ||
114 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
115 | atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ | ||
116 | __xchg_called_with_bad_pointer())) | ||
117 | |||
118 | #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \ | ||
119 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
120 | atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ | ||
121 | __cmpxchg_called_with_bad_pointer())) | ||
122 | |||
123 | /* A 64bit atomic type */ | ||
124 | |||
125 | typedef struct { | ||
126 | u64 __aligned(8) counter; | ||
127 | } atomic64_t; | ||
128 | |||
129 | #define ATOMIC64_INIT(val) { (val) } | ||
130 | |||
131 | u64 _atomic64_xchg(atomic64_t *v, u64 n); | ||
132 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i); | ||
133 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); | ||
134 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); | ||
135 | |||
136 | /** | ||
137 | * atomic64_read - read atomic variable | ||
138 | * @v: pointer of type atomic64_t | ||
139 | * | ||
140 | * Atomically reads the value of @v. | ||
141 | */ | ||
142 | static inline u64 atomic64_read(const atomic64_t *v) | ||
143 | { | ||
144 | /* | ||
145 | * Requires an atomic op to read both 32-bit parts consistently. | ||
146 | * Casting away const is safe since the atomic support routines | ||
147 | * do not write to memory if the value has not been modified. | ||
148 | */ | ||
149 | return _atomic64_xchg_add((atomic64_t *)v, 0); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * atomic64_xchg - atomically exchange contents of memory with a new value | ||
154 | * @v: pointer of type atomic64_t | ||
155 | * @i: integer value to store in memory | ||
156 | * | ||
157 | * Atomically sets @v to @i and returns old @v | ||
158 | */ | ||
159 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | ||
160 | { | ||
161 | smp_mb(); /* barrier for proper semantics */ | ||
162 | return _atomic64_xchg(v, n); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic64_cmpxchg - atomically exchange contents of memory if it matches | ||
167 | * @v: pointer of type atomic64_t | ||
168 | * @o: old value that memory should have | ||
169 | * @n: new value to write to memory if it matches | ||
170 | * | ||
171 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
172 | * Returns the old value at @v. | ||
173 | */ | ||
174 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||
175 | { | ||
176 | smp_mb(); /* barrier for proper semantics */ | ||
177 | return _atomic64_cmpxchg(v, o, n); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * atomic64_add - add integer to atomic variable | ||
182 | * @i: integer value to add | ||
183 | * @v: pointer of type atomic64_t | ||
184 | * | ||
185 | * Atomically adds @i to @v. | ||
186 | */ | ||
187 | static inline void atomic64_add(u64 i, atomic64_t *v) | ||
188 | { | ||
189 | _atomic64_xchg_add(v, i); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * atomic64_add_return - add integer and return | ||
194 | * @v: pointer of type atomic64_t | ||
195 | * @i: integer value to add | ||
196 | * | ||
197 | * Atomically adds @i to @v and returns @i + @v | ||
198 | */ | ||
199 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||
200 | { | ||
201 | smp_mb(); /* barrier for proper semantics */ | ||
202 | return _atomic64_xchg_add(v, i) + i; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * atomic64_add_unless - add unless the number is already a given value | ||
207 | * @v: pointer of type atomic64_t | ||
208 | * @a: the amount to add to v... | ||
209 | * @u: ...unless v is equal to u. | ||
210 | * | ||
211 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
212 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
213 | */ | ||
214 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||
215 | { | ||
216 | smp_mb(); /* barrier for proper semantics */ | ||
217 | return _atomic64_xchg_add_unless(v, a, u) != u; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * atomic64_set - set atomic variable | ||
222 | * @v: pointer of type atomic64_t | ||
223 | * @i: required value | ||
224 | * | ||
225 | * Atomically sets the value of @v to @i. | ||
226 | * | ||
227 | * atomic64_set() can't be just a raw store, since it would be lost if it | ||
228 | * fell between the load and store of one of the other atomic ops. | ||
229 | */ | ||
230 | static inline void atomic64_set(atomic64_t *v, u64 n) | ||
231 | { | ||
232 | _atomic64_xchg(v, n); | ||
233 | } | ||
234 | |||
235 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
236 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | ||
237 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | ||
238 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
239 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | ||
240 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | ||
241 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | ||
242 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | ||
243 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | ||
244 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
245 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | ||
246 | |||
247 | /* | ||
248 | * We need to barrier before modifying the word, since the _atomic_xxx() | ||
249 | * routines just tns the lock and then read/modify/write of the word. | ||
250 | * But after the word is updated, the routine issues an "mf" before returning, | ||
251 | * and since it's a function call, we don't even need a compiler barrier. | ||
252 | */ | ||
253 | #define smp_mb__before_atomic_dec() smp_mb() | ||
254 | #define smp_mb__before_atomic_inc() smp_mb() | ||
255 | #define smp_mb__after_atomic_dec() do { } while (0) | ||
256 | #define smp_mb__after_atomic_inc() do { } while (0) | ||
257 | |||
258 | |||
259 | /* | ||
260 | * Support "tns" atomic integers. These are atomic integers that can | ||
261 | * hold any value but "1". They are more efficient than regular atomic | ||
262 | * operations because the "lock" (aka acquire) step is a single "tns" | ||
263 | * in the uncontended case, and the "unlock" (aka release) step is a | ||
264 | * single "store" without an mf. (However, note that on tilepro the | ||
265 | * "tns" will evict the local cache line, so it's not all upside.) | ||
266 | * | ||
267 | * Note that you can ONLY observe the value stored in the pointer | ||
268 | * using these operations; a direct read of the value may confusingly | ||
269 | * return the special value "1". | ||
270 | */ | ||
271 | |||
272 | int __tns_atomic_acquire(atomic_t *); | ||
273 | void __tns_atomic_release(atomic_t *p, int v); | ||
274 | |||
275 | static inline void tns_atomic_set(atomic_t *v, int i) | ||
276 | { | ||
277 | __tns_atomic_acquire(v); | ||
278 | __tns_atomic_release(v, i); | ||
279 | } | ||
280 | |||
281 | static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) | ||
282 | { | ||
283 | int ret = __tns_atomic_acquire(v); | ||
284 | __tns_atomic_release(v, (ret == o) ? n : ret); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static inline int tns_atomic_xchg(atomic_t *v, int n) | ||
289 | { | ||
290 | int ret = __tns_atomic_acquire(v); | ||
291 | __tns_atomic_release(v, n); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | #endif /* !__ASSEMBLY__ */ | ||
296 | |||
297 | /* | ||
298 | * Internal definitions only beyond this point. | ||
299 | */ | ||
300 | |||
301 | #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \ | ||
302 | (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP)) | ||
303 | |||
304 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
305 | |||
306 | /* Number of entries in atomic_lock_ptr[]. */ | ||
307 | #define ATOMIC_HASH_L1_SHIFT 6 | ||
308 | #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT) | ||
309 | |||
310 | /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */ | ||
311 | #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2) | ||
312 | #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT) | ||
313 | |||
314 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
315 | |||
316 | /* | ||
317 | * Number of atomic locks in atomic_locks[]. Must be a power of two. | ||
318 | * There is no reason for more than PAGE_SIZE / 8 entries, since that | ||
319 | * is the maximum number of pointer bits we can use to index this. | ||
320 | * And we cannot have more than PAGE_SIZE / 4, since this has to | ||
321 | * fit on a single page and each entry takes 4 bytes. | ||
322 | */ | ||
323 | #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) | ||
324 | #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) | ||
325 | |||
326 | #ifndef __ASSEMBLY__ | ||
327 | extern int atomic_locks[]; | ||
328 | #endif | ||
329 | |||
330 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
331 | |||
332 | /* | ||
333 | * All the code that may fault while holding an atomic lock must | ||
334 | * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code | ||
335 | * can correctly release and reacquire the lock. Note that we | ||
336 | * mention the register number in a comment in "lib/atomic_asm.S" to help | ||
337 | * assembly coders from using this register by mistake, so if it | ||
338 | * is changed here, change that comment as well. | ||
339 | */ | ||
340 | #define ATOMIC_LOCK_REG 20 | ||
341 | #define ATOMIC_LOCK_REG_NAME r20 | ||
342 | |||
343 | #ifndef __ASSEMBLY__ | ||
344 | /* Called from setup to initialize a hash table to point to per_cpu locks. */ | ||
345 | void __init_atomic_per_cpu(void); | ||
346 | |||
347 | #ifdef CONFIG_SMP | ||
348 | /* Support releasing the atomic lock in do_page_fault_ics(). */ | ||
349 | void __atomic_fault_unlock(int *lock_ptr); | ||
350 | #endif | ||
351 | #endif /* !__ASSEMBLY__ */ | ||
352 | |||
353 | #endif /* _ASM_TILE_ATOMIC_32_H */ | ||
diff --git a/arch/tile/include/asm/auxvec.h b/arch/tile/include/asm/auxvec.h new file mode 100644 index 000000000000..1d393edb0641 --- /dev/null +++ b/arch/tile/include/asm/auxvec.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_AUXVEC_H | ||
16 | #define _ASM_TILE_AUXVEC_H | ||
17 | |||
18 | /* No extensions to auxvec */ | ||
19 | |||
20 | #endif /* _ASM_TILE_AUXVEC_H */ | ||
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h new file mode 100644 index 000000000000..6970bfcad549 --- /dev/null +++ b/arch/tile/include/asm/backtrace.h | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _TILE_BACKTRACE_H | ||
16 | #define _TILE_BACKTRACE_H | ||
17 | |||
18 | |||
19 | |||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #if CHIP_VA_WIDTH() > 32 | ||
25 | typedef unsigned long long VirtualAddress; | ||
26 | #else | ||
27 | typedef unsigned int VirtualAddress; | ||
28 | #endif | ||
29 | |||
30 | |||
31 | /** Reads 'size' bytes from 'address' and writes the data to 'result'. | ||
32 | * Returns true if successful, else false (e.g. memory not readable). | ||
33 | */ | ||
34 | typedef bool (*BacktraceMemoryReader)(void *result, | ||
35 | VirtualAddress address, | ||
36 | unsigned int size, | ||
37 | void *extra); | ||
38 | |||
39 | typedef struct { | ||
40 | /** Current PC. */ | ||
41 | VirtualAddress pc; | ||
42 | |||
43 | /** Current stack pointer value. */ | ||
44 | VirtualAddress sp; | ||
45 | |||
46 | /** Current frame pointer value (i.e. caller's stack pointer) */ | ||
47 | VirtualAddress fp; | ||
48 | |||
49 | /** Internal use only: caller's PC for first frame. */ | ||
50 | VirtualAddress initial_frame_caller_pc; | ||
51 | |||
52 | /** Internal use only: callback to read memory. */ | ||
53 | BacktraceMemoryReader read_memory_func; | ||
54 | |||
55 | /** Internal use only: arbitrary argument to read_memory_func. */ | ||
56 | void *read_memory_func_extra; | ||
57 | |||
58 | } BacktraceIterator; | ||
59 | |||
60 | |||
61 | /** Initializes a backtracer to start from the given location. | ||
62 | * | ||
63 | * If the frame pointer cannot be determined it is set to -1. | ||
64 | * | ||
65 | * @param state The state to be filled in. | ||
66 | * @param read_memory_func A callback that reads memory. If NULL, a default | ||
67 | * value is provided. | ||
68 | * @param read_memory_func_extra An arbitrary argument to read_memory_func. | ||
69 | * @param pc The current PC. | ||
70 | * @param lr The current value of the 'lr' register. | ||
71 | * @param sp The current value of the 'sp' register. | ||
72 | * @param r52 The current value of the 'r52' register. | ||
73 | */ | ||
74 | extern void backtrace_init(BacktraceIterator *state, | ||
75 | BacktraceMemoryReader read_memory_func, | ||
76 | void *read_memory_func_extra, | ||
77 | VirtualAddress pc, VirtualAddress lr, | ||
78 | VirtualAddress sp, VirtualAddress r52); | ||
79 | |||
80 | |||
81 | /** Advances the backtracing state to the calling frame, returning | ||
82 | * true iff successful. | ||
83 | */ | ||
84 | extern bool backtrace_next(BacktraceIterator *state); | ||
85 | |||
86 | |||
87 | typedef enum { | ||
88 | |||
89 | /* We have no idea what the caller's pc is. */ | ||
90 | PC_LOC_UNKNOWN, | ||
91 | |||
92 | /* The caller's pc is currently in lr. */ | ||
93 | PC_LOC_IN_LR, | ||
94 | |||
95 | /* The caller's pc can be found by dereferencing the caller's sp. */ | ||
96 | PC_LOC_ON_STACK | ||
97 | |||
98 | } CallerPCLocation; | ||
99 | |||
100 | |||
101 | typedef enum { | ||
102 | |||
103 | /* We have no idea what the caller's sp is. */ | ||
104 | SP_LOC_UNKNOWN, | ||
105 | |||
106 | /* The caller's sp is currently in r52. */ | ||
107 | SP_LOC_IN_R52, | ||
108 | |||
109 | /* The caller's sp can be found by adding a certain constant | ||
110 | * to the current value of sp. | ||
111 | */ | ||
112 | SP_LOC_OFFSET | ||
113 | |||
114 | } CallerSPLocation; | ||
115 | |||
116 | |||
117 | /* Bit values ORed into CALLER_* values for info ops. */ | ||
118 | enum { | ||
119 | /* Setting the low bit on any of these values means the info op | ||
120 | * applies only to one bundle ago. | ||
121 | */ | ||
122 | ONE_BUNDLE_AGO_FLAG = 1, | ||
123 | |||
124 | /* Setting this bit on a CALLER_SP_* value means the PC is in LR. | ||
125 | * If not set, PC is on the stack. | ||
126 | */ | ||
127 | PC_IN_LR_FLAG = 2, | ||
128 | |||
129 | /* This many of the low bits of a CALLER_SP_* value are for the | ||
130 | * flag bits above. | ||
131 | */ | ||
132 | NUM_INFO_OP_FLAGS = 2, | ||
133 | |||
134 | /* We cannot have one in the memory pipe so this is the maximum. */ | ||
135 | MAX_INFO_OPS_PER_BUNDLE = 2 | ||
136 | }; | ||
137 | |||
138 | |||
139 | /** Internal constants used to define 'info' operands. */ | ||
140 | enum { | ||
141 | /* 0 and 1 are reserved, as are all negative numbers. */ | ||
142 | |||
143 | CALLER_UNKNOWN_BASE = 2, | ||
144 | |||
145 | CALLER_SP_IN_R52_BASE = 4, | ||
146 | |||
147 | CALLER_SP_OFFSET_BASE = 8 | ||
148 | }; | ||
149 | |||
150 | |||
151 | /** Current backtracer state describing where it thinks the caller is. */ | ||
152 | typedef struct { | ||
153 | /* | ||
154 | * Public fields | ||
155 | */ | ||
156 | |||
157 | /* How do we find the caller's PC? */ | ||
158 | CallerPCLocation pc_location : 8; | ||
159 | |||
160 | /* How do we find the caller's SP? */ | ||
161 | CallerSPLocation sp_location : 8; | ||
162 | |||
163 | /* If sp_location == SP_LOC_OFFSET, then caller_sp == sp + | ||
164 | * loc->sp_offset. Else this field is undefined. | ||
165 | */ | ||
166 | uint16_t sp_offset; | ||
167 | |||
168 | /* In the most recently visited bundle a terminating bundle? */ | ||
169 | bool at_terminating_bundle; | ||
170 | |||
171 | /* | ||
172 | * Private fields | ||
173 | */ | ||
174 | |||
175 | /* Will the forward scanner see someone clobbering sp | ||
176 | * (i.e. changing it with something other than addi sp, sp, N?) | ||
177 | */ | ||
178 | bool sp_clobber_follows; | ||
179 | |||
180 | /* Operand to next "visible" info op (no more than one bundle past | ||
181 | * the next terminating bundle), or -32768 if none. | ||
182 | */ | ||
183 | int16_t next_info_operand; | ||
184 | |||
185 | /* Is the info of in next_info_op in the very next bundle? */ | ||
186 | bool is_next_info_operand_adjacent; | ||
187 | |||
188 | } CallerLocation; | ||
189 | |||
190 | |||
191 | |||
192 | |||
193 | #endif /* _TILE_BACKTRACE_H */ | ||
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h new file mode 100644 index 000000000000..84600f3514da --- /dev/null +++ b/arch/tile/include/asm/bitops.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 1992, Linus Torvalds. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_BITOPS_H | ||
17 | #define _ASM_TILE_BITOPS_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | #ifndef _LINUX_BITOPS_H | ||
22 | #error only <linux/bitops.h> can be included directly | ||
23 | #endif | ||
24 | |||
25 | #ifdef __tilegx__ | ||
26 | #include <asm/bitops_64.h> | ||
27 | #else | ||
28 | #include <asm/bitops_32.h> | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * __ffs - find first set bit in word | ||
33 | * @word: The word to search | ||
34 | * | ||
35 | * Undefined if no set bit exists, so code should check against 0 first. | ||
36 | */ | ||
37 | static inline unsigned long __ffs(unsigned long word) | ||
38 | { | ||
39 | return __builtin_ctzl(word); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * ffz - find first zero bit in word | ||
44 | * @word: The word to search | ||
45 | * | ||
46 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
47 | */ | ||
48 | static inline unsigned long ffz(unsigned long word) | ||
49 | { | ||
50 | return __builtin_ctzl(~word); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __fls - find last set bit in word | ||
55 | * @word: The word to search | ||
56 | * | ||
57 | * Undefined if no set bit exists, so code should check against 0 first. | ||
58 | */ | ||
59 | static inline unsigned long __fls(unsigned long word) | ||
60 | { | ||
61 | return (sizeof(word) * 8) - 1 - __builtin_clzl(word); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * ffs - find first set bit in word | ||
66 | * @x: the word to search | ||
67 | * | ||
68 | * This is defined the same way as the libc and compiler builtin ffs | ||
69 | * routines, therefore differs in spirit from the other bitops. | ||
70 | * | ||
71 | * ffs(value) returns 0 if value is 0 or the position of the first | ||
72 | * set bit if value is nonzero. The first (least significant) bit | ||
73 | * is at position 1. | ||
74 | */ | ||
75 | static inline int ffs(int x) | ||
76 | { | ||
77 | return __builtin_ffs(x); | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * fls - find last set bit in word | ||
82 | * @x: the word to search | ||
83 | * | ||
84 | * This is defined in a similar way as the libc and compiler builtin | ||
85 | * ffs, but returns the position of the most significant set bit. | ||
86 | * | ||
87 | * fls(value) returns 0 if value is 0 or the position of the last | ||
88 | * set bit if value is nonzero. The last (most significant) bit is | ||
89 | * at position 32. | ||
90 | */ | ||
91 | static inline int fls(int x) | ||
92 | { | ||
93 | return (sizeof(int) * 8) - __builtin_clz(x); | ||
94 | } | ||
95 | |||
96 | static inline int fls64(__u64 w) | ||
97 | { | ||
98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | ||
99 | } | ||
100 | |||
101 | static inline unsigned int hweight32(unsigned int w) | ||
102 | { | ||
103 | return __builtin_popcount(w); | ||
104 | } | ||
105 | |||
106 | static inline unsigned int hweight16(unsigned int w) | ||
107 | { | ||
108 | return __builtin_popcount(w & 0xffff); | ||
109 | } | ||
110 | |||
111 | static inline unsigned int hweight8(unsigned int w) | ||
112 | { | ||
113 | return __builtin_popcount(w & 0xff); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long hweight64(__u64 w) | ||
117 | { | ||
118 | return __builtin_popcountll(w); | ||
119 | } | ||
120 | |||
121 | #include <asm-generic/bitops/lock.h> | ||
122 | #include <asm-generic/bitops/sched.h> | ||
123 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
124 | #include <asm-generic/bitops/minix.h> | ||
125 | |||
126 | #endif /* _ASM_TILE_BITOPS_H */ | ||
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h new file mode 100644 index 000000000000..7a93c001ac19 --- /dev/null +++ b/arch/tile/include/asm/bitops_32.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITOPS_32_H | ||
16 | #define _ASM_TILE_BITOPS_32_H | ||
17 | |||
18 | #include <linux/compiler.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/system.h> | ||
21 | |||
22 | /* Tile-specific routines to support <asm/bitops.h>. */ | ||
23 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); | ||
24 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask); | ||
25 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); | ||
26 | |||
27 | /** | ||
28 | * set_bit - Atomically set a bit in memory | ||
29 | * @nr: the bit to set | ||
30 | * @addr: the address to start counting from | ||
31 | * | ||
32 | * This function is atomic and may not be reordered. | ||
33 | * See __set_bit() if you do not require the atomic guarantees. | ||
34 | * Note that @nr may be almost arbitrarily large; this function is not | ||
35 | * restricted to acting on a single-word quantity. | ||
36 | */ | ||
37 | static inline void set_bit(unsigned nr, volatile unsigned long *addr) | ||
38 | { | ||
39 | _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * clear_bit - Clears a bit in memory | ||
44 | * @nr: Bit to clear | ||
45 | * @addr: Address to start counting from | ||
46 | * | ||
47 | * clear_bit() is atomic and may not be reordered. | ||
48 | * See __clear_bit() if you do not require the atomic guarantees. | ||
49 | * Note that @nr may be almost arbitrarily large; this function is not | ||
50 | * restricted to acting on a single-word quantity. | ||
51 | * | ||
52 | * clear_bit() may not contain a memory barrier, so if it is used for | ||
53 | * locking purposes, you should call smp_mb__before_clear_bit() and/or | ||
54 | * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. | ||
55 | */ | ||
56 | static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | ||
57 | { | ||
58 | _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * change_bit - Toggle a bit in memory | ||
63 | * @nr: Bit to change | ||
64 | * @addr: Address to start counting from | ||
65 | * | ||
66 | * change_bit() is atomic and may not be reordered. | ||
67 | * See __change_bit() if you do not require the atomic guarantees. | ||
68 | * Note that @nr may be almost arbitrarily large; this function is not | ||
69 | * restricted to acting on a single-word quantity. | ||
70 | */ | ||
71 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) | ||
72 | { | ||
73 | _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * test_and_set_bit - Set a bit and return its old value | ||
78 | * @nr: Bit to set | ||
79 | * @addr: Address to count from | ||
80 | * | ||
81 | * This operation is atomic and cannot be reordered. | ||
82 | * It also implies a memory barrier. | ||
83 | */ | ||
84 | static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) | ||
85 | { | ||
86 | unsigned long mask = BIT_MASK(nr); | ||
87 | addr += BIT_WORD(nr); | ||
88 | smp_mb(); /* barrier for proper semantics */ | ||
89 | return (_atomic_or(addr, mask) & mask) != 0; | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * test_and_clear_bit - Clear a bit and return its old value | ||
94 | * @nr: Bit to clear | ||
95 | * @addr: Address to count from | ||
96 | * | ||
97 | * This operation is atomic and cannot be reordered. | ||
98 | * It also implies a memory barrier. | ||
99 | */ | ||
100 | static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) | ||
101 | { | ||
102 | unsigned long mask = BIT_MASK(nr); | ||
103 | addr += BIT_WORD(nr); | ||
104 | smp_mb(); /* barrier for proper semantics */ | ||
105 | return (_atomic_andn(addr, mask) & mask) != 0; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * test_and_change_bit - Change a bit and return its old value | ||
110 | * @nr: Bit to change | ||
111 | * @addr: Address to count from | ||
112 | * | ||
113 | * This operation is atomic and cannot be reordered. | ||
114 | * It also implies a memory barrier. | ||
115 | */ | ||
116 | static inline int test_and_change_bit(unsigned nr, | ||
117 | volatile unsigned long *addr) | ||
118 | { | ||
119 | unsigned long mask = BIT_MASK(nr); | ||
120 | addr += BIT_WORD(nr); | ||
121 | smp_mb(); /* barrier for proper semantics */ | ||
122 | return (_atomic_xor(addr, mask) & mask) != 0; | ||
123 | } | ||
124 | |||
125 | /* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */ | ||
126 | #define smp_mb__before_clear_bit() smp_mb() | ||
127 | #define smp_mb__after_clear_bit() do {} while (0) | ||
128 | |||
129 | #include <asm-generic/bitops/non-atomic.h> | ||
130 | #include <asm-generic/bitops/ext2-atomic.h> | ||
131 | |||
132 | #endif /* _ASM_TILE_BITOPS_32_H */ | ||
diff --git a/arch/tile/include/asm/bitsperlong.h b/arch/tile/include/asm/bitsperlong.h new file mode 100644 index 000000000000..58c771f2af2f --- /dev/null +++ b/arch/tile/include/asm/bitsperlong.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITSPERLONG_H | ||
16 | #define _ASM_TILE_BITSPERLONG_H | ||
17 | |||
18 | #ifdef __LP64__ | ||
19 | # define __BITS_PER_LONG 64 | ||
20 | #else | ||
21 | # define __BITS_PER_LONG 32 | ||
22 | #endif | ||
23 | |||
24 | #include <asm-generic/bitsperlong.h> | ||
25 | |||
26 | #endif /* _ASM_TILE_BITSPERLONG_H */ | ||
diff --git a/arch/tile/include/asm/bug.h b/arch/tile/include/asm/bug.h new file mode 100644 index 000000000000..b12fd89e42e9 --- /dev/null +++ b/arch/tile/include/asm/bug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bug.h> | |||
diff --git a/arch/tile/include/asm/bugs.h b/arch/tile/include/asm/bugs.h new file mode 100644 index 000000000000..61791e1ad9f5 --- /dev/null +++ b/arch/tile/include/asm/bugs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bugs.h> | |||
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h new file mode 100644 index 000000000000..9558416d578b --- /dev/null +++ b/arch/tile/include/asm/byteorder.h | |||
@@ -0,0 +1 @@ | |||
#include <linux/byteorder/little_endian.h> | |||
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h new file mode 100644 index 000000000000..c2b7dcfe5327 --- /dev/null +++ b/arch/tile/include/asm/cache.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHE_H | ||
16 | #define _ASM_TILE_CACHE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* bytes per L1 data cache line */ | ||
21 | #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() | ||
22 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | ||
23 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1)) & -L1_CACHE_BYTES) | ||
24 | |||
25 | /* bytes per L1 instruction cache line */ | ||
26 | #define L1I_CACHE_SHIFT CHIP_L1I_LOG_LINE_SIZE() | ||
27 | #define L1I_CACHE_BYTES (1 << L1I_CACHE_SHIFT) | ||
28 | #define L1I_CACHE_ALIGN(x) (((x)+(L1I_CACHE_BYTES-1)) & -L1I_CACHE_BYTES) | ||
29 | |||
30 | /* bytes per L2 cache line */ | ||
31 | #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() | ||
32 | #define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) | ||
33 | #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) | ||
34 | |||
35 | /* use the cache line size for the L2, which is where it counts */ | ||
36 | #define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT | ||
37 | #define SMP_CACHE_BYTES L2_CACHE_BYTES | ||
38 | #define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT | ||
39 | #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES | ||
40 | |||
41 | /* Group together read-mostly things to avoid cache false sharing */ | ||
42 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
43 | |||
44 | /* | ||
45 | * Attribute for data that is kept read/write coherent until the end of | ||
46 | * initialization, then bumped to read/only incoherent for performance. | ||
47 | */ | ||
48 | #define __write_once __attribute__((__section__(".w1data"))) | ||
49 | |||
50 | #endif /* _ASM_TILE_CACHE_H */ | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h new file mode 100644 index 000000000000..7e2096a4ef7d --- /dev/null +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHEFLUSH_H | ||
16 | #define _ASM_TILE_CACHEFLUSH_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* Keep includes the same across arches. */ | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/cache.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | /* Caches are physically-indexed and so don't need special treatment */ | ||
26 | #define flush_cache_all() do { } while (0) | ||
27 | #define flush_cache_mm(mm) do { } while (0) | ||
28 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
29 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
30 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
31 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
32 | #define flush_dcache_page(page) do { } while (0) | ||
33 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
34 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
35 | #define flush_cache_vmap(start, end) do { } while (0) | ||
36 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
37 | #define flush_icache_page(vma, pg) do { } while (0) | ||
38 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) | ||
39 | |||
40 | /* See "arch/tile/lib/__invalidate_icache.S". */ | ||
41 | extern void __invalidate_icache(unsigned long start, unsigned long size); | ||
42 | |||
43 | /* Flush the icache just on this cpu */ | ||
44 | static inline void __flush_icache_range(unsigned long start, unsigned long end) | ||
45 | { | ||
46 | __invalidate_icache(start, end - start); | ||
47 | } | ||
48 | |||
49 | /* Flush the entire icache on this cpu. */ | ||
50 | #define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE()) | ||
51 | |||
52 | #ifdef CONFIG_SMP | ||
53 | /* | ||
54 | * When the kernel writes to its own text we need to do an SMP | ||
55 | * broadcast to make the L1I coherent everywhere. This includes | ||
56 | * module load and single step. | ||
57 | */ | ||
58 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
59 | #else | ||
60 | #define flush_icache_range __flush_icache_range | ||
61 | #endif | ||
62 | |||
63 | /* | ||
64 | * An update to an executable user page requires icache flushing. | ||
65 | * We could carefully update only tiles that are running this process, | ||
66 | * and rely on the fact that we flush the icache on every context | ||
67 | * switch to avoid doing extra work here. But for now, I'll be | ||
68 | * conservative and just do a global icache flush. | ||
69 | */ | ||
70 | static inline void copy_to_user_page(struct vm_area_struct *vma, | ||
71 | struct page *page, unsigned long vaddr, | ||
72 | void *dst, void *src, int len) | ||
73 | { | ||
74 | memcpy(dst, src, len); | ||
75 | if (vma->vm_flags & VM_EXEC) { | ||
76 | flush_icache_range((unsigned long) dst, | ||
77 | (unsigned long) dst + len); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
82 | memcpy((dst), (src), (len)) | ||
83 | |||
84 | /* | ||
85 | * Invalidate a VA range; pads to L2 cacheline boundaries. | ||
86 | * | ||
87 | * Note that on TILE64, __inv_buffer() actually flushes modified | ||
88 | * cache lines in addition to invalidating them, i.e., it's the | ||
89 | * same as __finv_buffer(). | ||
90 | */ | ||
91 | static inline void __inv_buffer(void *buffer, size_t size) | ||
92 | { | ||
93 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
94 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
95 | while (next < finish) { | ||
96 | __insn_inv(next); | ||
97 | next += CHIP_INV_STRIDE(); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* Flush a VA range; pads to L2 cacheline boundaries. */ | ||
102 | static inline void __flush_buffer(void *buffer, size_t size) | ||
103 | { | ||
104 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
105 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
106 | while (next < finish) { | ||
107 | __insn_flush(next); | ||
108 | next += CHIP_FLUSH_STRIDE(); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */ | ||
113 | static inline void __finv_buffer(void *buffer, size_t size) | ||
114 | { | ||
115 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
116 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
117 | while (next < finish) { | ||
118 | __insn_finv(next); | ||
119 | next += CHIP_FINV_STRIDE(); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | |||
124 | /* Invalidate a VA range, then memory fence. */ | ||
125 | static inline void inv_buffer(void *buffer, size_t size) | ||
126 | { | ||
127 | __inv_buffer(buffer, size); | ||
128 | mb_incoherent(); | ||
129 | } | ||
130 | |||
131 | /* Flush a VA range, then memory fence. */ | ||
132 | static inline void flush_buffer(void *buffer, size_t size) | ||
133 | { | ||
134 | __flush_buffer(buffer, size); | ||
135 | mb_incoherent(); | ||
136 | } | ||
137 | |||
138 | /* Flush & invalidate a VA range, then memory fence. */ | ||
139 | static inline void finv_buffer(void *buffer, size_t size) | ||
140 | { | ||
141 | __finv_buffer(buffer, size); | ||
142 | mb_incoherent(); | ||
143 | } | ||
144 | |||
145 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h new file mode 100644 index 000000000000..a120766c7264 --- /dev/null +++ b/arch/tile/include/asm/checksum.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CHECKSUM_H | ||
16 | #define _ASM_TILE_CHECKSUM_H | ||
17 | |||
18 | #include <asm-generic/checksum.h> | ||
19 | |||
20 | /* Allow us to provide a more optimized do_csum(). */ | ||
21 | __wsum do_csum(const unsigned char *buff, int len); | ||
22 | #define do_csum do_csum | ||
23 | |||
24 | #endif /* _ASM_TILE_CHECKSUM_H */ | ||
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h new file mode 100644 index 000000000000..e133c53f6c4f --- /dev/null +++ b/arch/tile/include/asm/compat.h | |||
@@ -0,0 +1,308 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_COMPAT_H | ||
16 | #define _ASM_TILE_COMPAT_H | ||
17 | |||
18 | /* | ||
19 | * Architecture specific compatibility types | ||
20 | */ | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/sched.h> | ||
23 | |||
24 | #define COMPAT_USER_HZ 100 | ||
25 | |||
26 | /* "long" and pointer-based types are different. */ | ||
27 | typedef s32 compat_long_t; | ||
28 | typedef u32 compat_ulong_t; | ||
29 | typedef u32 compat_size_t; | ||
30 | typedef s32 compat_ssize_t; | ||
31 | typedef s32 compat_off_t; | ||
32 | typedef s32 compat_time_t; | ||
33 | typedef s32 compat_clock_t; | ||
34 | typedef u32 compat_ino_t; | ||
35 | typedef u32 compat_caddr_t; | ||
36 | typedef u32 compat_uptr_t; | ||
37 | |||
38 | /* Many types are "int" or otherwise the same. */ | ||
39 | typedef __kernel_pid_t compat_pid_t; | ||
40 | typedef __kernel_uid_t __compat_uid_t; | ||
41 | typedef __kernel_gid_t __compat_gid_t; | ||
42 | typedef __kernel_uid32_t __compat_uid32_t; | ||
43 | typedef __kernel_uid32_t __compat_gid32_t; | ||
44 | typedef __kernel_mode_t compat_mode_t; | ||
45 | typedef __kernel_dev_t compat_dev_t; | ||
46 | typedef __kernel_loff_t compat_loff_t; | ||
47 | typedef __kernel_nlink_t compat_nlink_t; | ||
48 | typedef __kernel_ipc_pid_t compat_ipc_pid_t; | ||
49 | typedef __kernel_daddr_t compat_daddr_t; | ||
50 | typedef __kernel_fsid_t compat_fsid_t; | ||
51 | typedef __kernel_timer_t compat_timer_t; | ||
52 | typedef __kernel_key_t compat_key_t; | ||
53 | typedef int compat_int_t; | ||
54 | typedef s64 compat_s64; | ||
55 | typedef uint compat_uint_t; | ||
56 | typedef u64 compat_u64; | ||
57 | |||
58 | /* We use the same register dump format in 32-bit images. */ | ||
59 | typedef unsigned long compat_elf_greg_t; | ||
60 | #define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t)) | ||
61 | typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; | ||
62 | |||
63 | struct compat_timespec { | ||
64 | compat_time_t tv_sec; | ||
65 | s32 tv_nsec; | ||
66 | }; | ||
67 | |||
68 | struct compat_timeval { | ||
69 | compat_time_t tv_sec; | ||
70 | s32 tv_usec; | ||
71 | }; | ||
72 | |||
73 | struct compat_stat { | ||
74 | unsigned int st_dev; | ||
75 | unsigned int st_ino; | ||
76 | unsigned int st_mode; | ||
77 | unsigned int st_nlink; | ||
78 | unsigned int st_uid; | ||
79 | unsigned int st_gid; | ||
80 | unsigned int st_rdev; | ||
81 | unsigned int __pad1; | ||
82 | int st_size; | ||
83 | int st_blksize; | ||
84 | int __pad2; | ||
85 | int st_blocks; | ||
86 | int st_atime; | ||
87 | unsigned int st_atime_nsec; | ||
88 | int st_mtime; | ||
89 | unsigned int st_mtime_nsec; | ||
90 | int st_ctime; | ||
91 | unsigned int st_ctime_nsec; | ||
92 | unsigned int __unused[2]; | ||
93 | }; | ||
94 | |||
95 | struct compat_stat64 { | ||
96 | unsigned long st_dev; | ||
97 | unsigned long st_ino; | ||
98 | unsigned int st_mode; | ||
99 | unsigned int st_nlink; | ||
100 | unsigned int st_uid; | ||
101 | unsigned int st_gid; | ||
102 | unsigned long st_rdev; | ||
103 | long st_size; | ||
104 | unsigned int st_blksize; | ||
105 | unsigned long st_blocks __attribute__((packed)); | ||
106 | unsigned int st_atime; | ||
107 | unsigned int st_atime_nsec; | ||
108 | unsigned int st_mtime; | ||
109 | unsigned int st_mtime_nsec; | ||
110 | unsigned int st_ctime; | ||
111 | unsigned int st_ctime_nsec; | ||
112 | unsigned int __unused8; | ||
113 | }; | ||
114 | |||
115 | #define compat_statfs statfs | ||
116 | |||
117 | struct compat_sysctl { | ||
118 | unsigned int name; | ||
119 | int nlen; | ||
120 | unsigned int oldval; | ||
121 | unsigned int oldlenp; | ||
122 | unsigned int newval; | ||
123 | unsigned int newlen; | ||
124 | unsigned int __unused[4]; | ||
125 | }; | ||
126 | |||
127 | |||
128 | struct compat_flock { | ||
129 | short l_type; | ||
130 | short l_whence; | ||
131 | compat_off_t l_start; | ||
132 | compat_off_t l_len; | ||
133 | compat_pid_t l_pid; | ||
134 | }; | ||
135 | |||
136 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
137 | #define F_SETLK64 13 | ||
138 | #define F_SETLKW64 14 | ||
139 | |||
140 | struct compat_flock64 { | ||
141 | short l_type; | ||
142 | short l_whence; | ||
143 | compat_loff_t l_start; | ||
144 | compat_loff_t l_len; | ||
145 | compat_pid_t l_pid; | ||
146 | }; | ||
147 | |||
148 | #define COMPAT_RLIM_INFINITY 0xffffffff | ||
149 | |||
150 | #define _COMPAT_NSIG 64 | ||
151 | #define _COMPAT_NSIG_BPW 32 | ||
152 | |||
153 | typedef u32 compat_sigset_word; | ||
154 | |||
155 | #define COMPAT_OFF_T_MAX 0x7fffffff | ||
156 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | ||
157 | |||
158 | struct compat_ipc64_perm { | ||
159 | compat_key_t key; | ||
160 | __compat_uid32_t uid; | ||
161 | __compat_gid32_t gid; | ||
162 | __compat_uid32_t cuid; | ||
163 | __compat_gid32_t cgid; | ||
164 | unsigned short mode; | ||
165 | unsigned short __pad1; | ||
166 | unsigned short seq; | ||
167 | unsigned short __pad2; | ||
168 | compat_ulong_t unused1; | ||
169 | compat_ulong_t unused2; | ||
170 | }; | ||
171 | |||
172 | struct compat_semid64_ds { | ||
173 | struct compat_ipc64_perm sem_perm; | ||
174 | compat_time_t sem_otime; | ||
175 | compat_ulong_t __unused1; | ||
176 | compat_time_t sem_ctime; | ||
177 | compat_ulong_t __unused2; | ||
178 | compat_ulong_t sem_nsems; | ||
179 | compat_ulong_t __unused3; | ||
180 | compat_ulong_t __unused4; | ||
181 | }; | ||
182 | |||
183 | struct compat_msqid64_ds { | ||
184 | struct compat_ipc64_perm msg_perm; | ||
185 | compat_time_t msg_stime; | ||
186 | compat_ulong_t __unused1; | ||
187 | compat_time_t msg_rtime; | ||
188 | compat_ulong_t __unused2; | ||
189 | compat_time_t msg_ctime; | ||
190 | compat_ulong_t __unused3; | ||
191 | compat_ulong_t msg_cbytes; | ||
192 | compat_ulong_t msg_qnum; | ||
193 | compat_ulong_t msg_qbytes; | ||
194 | compat_pid_t msg_lspid; | ||
195 | compat_pid_t msg_lrpid; | ||
196 | compat_ulong_t __unused4; | ||
197 | compat_ulong_t __unused5; | ||
198 | }; | ||
199 | |||
200 | struct compat_shmid64_ds { | ||
201 | struct compat_ipc64_perm shm_perm; | ||
202 | compat_size_t shm_segsz; | ||
203 | compat_time_t shm_atime; | ||
204 | compat_ulong_t __unused1; | ||
205 | compat_time_t shm_dtime; | ||
206 | compat_ulong_t __unused2; | ||
207 | compat_time_t shm_ctime; | ||
208 | compat_ulong_t __unused3; | ||
209 | compat_pid_t shm_cpid; | ||
210 | compat_pid_t shm_lpid; | ||
211 | compat_ulong_t shm_nattch; | ||
212 | compat_ulong_t __unused4; | ||
213 | compat_ulong_t __unused5; | ||
214 | }; | ||
215 | |||
216 | /* | ||
217 | * A pointer passed in from user mode. This should not | ||
218 | * be used for syscall parameters, just declare them | ||
219 | * as pointers because the syscall entry code will have | ||
220 | * appropriately converted them already. | ||
221 | */ | ||
222 | |||
223 | static inline void __user *compat_ptr(compat_uptr_t uptr) | ||
224 | { | ||
225 | return (void __user *)(unsigned long)uptr; | ||
226 | } | ||
227 | |||
228 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
229 | { | ||
230 | return (u32)(unsigned long)uptr; | ||
231 | } | ||
232 | |||
233 | /* Sign-extend when storing a kernel pointer to a user's ptregs. */ | ||
234 | static inline unsigned long ptr_to_compat_reg(void __user *uptr) | ||
235 | { | ||
236 | return (long)(int)(long)uptr; | ||
237 | } | ||
238 | |||
239 | static inline void __user *compat_alloc_user_space(long len) | ||
240 | { | ||
241 | struct pt_regs *regs = task_pt_regs(current); | ||
242 | return (void __user *)regs->sp - len; | ||
243 | } | ||
244 | |||
245 | static inline int is_compat_task(void) | ||
246 | { | ||
247 | return current_thread_info()->status & TS_COMPAT; | ||
248 | } | ||
249 | |||
250 | extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, | ||
251 | siginfo_t *info, sigset_t *set, | ||
252 | struct pt_regs *regs); | ||
253 | |||
254 | /* Compat syscalls. */ | ||
255 | struct compat_sigaction; | ||
256 | struct compat_siginfo; | ||
257 | struct compat_sigaltstack; | ||
258 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
259 | compat_uptr_t __user *envp); | ||
260 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
261 | struct compat_sigaction __user *oact, | ||
262 | size_t sigsetsize); | ||
263 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
264 | struct compat_siginfo __user *uinfo); | ||
265 | long compat_sys_rt_sigreturn(void); | ||
266 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
267 | struct compat_sigaltstack __user *uoss_ptr); | ||
268 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high); | ||
269 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high); | ||
270 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
271 | u32 dummy, u32 low, u32 high); | ||
272 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
273 | u32 dummy, u32 low, u32 high); | ||
274 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len); | ||
275 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
276 | u32 offset_lo, u32 offset_hi, | ||
277 | u32 nbytes_lo, u32 nbytes_hi); | ||
278 | long compat_sys_fallocate(int fd, int mode, | ||
279 | u32 offset_lo, u32 offset_hi, | ||
280 | u32 len_lo, u32 len_hi); | ||
281 | long compat_sys_stat64(char __user *filename, | ||
282 | struct compat_stat64 __user *statbuf); | ||
283 | long compat_sys_lstat64(char __user *filename, | ||
284 | struct compat_stat64 __user *statbuf); | ||
285 | long compat_sys_fstat64(unsigned int fd, struct compat_stat64 __user *statbuf); | ||
286 | long compat_sys_fstatat64(int dfd, char __user *filename, | ||
287 | struct compat_stat64 __user *statbuf, int flag); | ||
288 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
289 | struct compat_timespec __user *interval); | ||
290 | ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | ||
291 | size_t count); | ||
292 | |||
293 | /* Versions of compat functions that differ from generic Linux. */ | ||
294 | struct compat_msgbuf; | ||
295 | long tile_compat_sys_msgsnd(int msqid, | ||
296 | struct compat_msgbuf __user *msgp, | ||
297 | size_t msgsz, int msgflg); | ||
298 | long tile_compat_sys_msgrcv(int msqid, | ||
299 | struct compat_msgbuf __user *msgp, | ||
300 | size_t msgsz, long msgtyp, int msgflg); | ||
301 | long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid, | ||
302 | compat_long_t addr, compat_long_t data); | ||
303 | |||
304 | /* Tilera Linux syscalls that don't have "compat" versions. */ | ||
305 | #define compat_sys_raise_fpe sys_raise_fpe | ||
306 | #define compat_sys_flush_cache sys_flush_cache | ||
307 | |||
308 | #endif /* _ASM_TILE_COMPAT_H */ | ||
diff --git a/arch/tile/include/asm/cputime.h b/arch/tile/include/asm/cputime.h new file mode 100644 index 000000000000..6d68ad7e0ea3 --- /dev/null +++ b/arch/tile/include/asm/cputime.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/cputime.h> | |||
diff --git a/arch/tile/include/asm/current.h b/arch/tile/include/asm/current.h new file mode 100644 index 000000000000..da21acf020d3 --- /dev/null +++ b/arch/tile/include/asm/current.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CURRENT_H | ||
16 | #define _ASM_TILE_CURRENT_H | ||
17 | |||
18 | #include <linux/thread_info.h> | ||
19 | |||
20 | struct task_struct; | ||
21 | |||
22 | static inline struct task_struct *get_current(void) | ||
23 | { | ||
24 | return current_thread_info()->task; | ||
25 | } | ||
26 | #define current get_current() | ||
27 | |||
28 | /* Return a usable "task_struct" pointer even if the real one is corrupt. */ | ||
29 | struct task_struct *validate_current(void); | ||
30 | |||
31 | #endif /* _ASM_TILE_CURRENT_H */ | ||
diff --git a/arch/tile/include/asm/delay.h b/arch/tile/include/asm/delay.h new file mode 100644 index 000000000000..97b0e69e704e --- /dev/null +++ b/arch/tile/include/asm/delay.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DELAY_H | ||
16 | #define _ASM_TILE_DELAY_H | ||
17 | |||
18 | /* Undefined functions to get compile-time errors. */ | ||
19 | extern void __bad_udelay(void); | ||
20 | extern void __bad_ndelay(void); | ||
21 | |||
22 | extern void __udelay(unsigned long usecs); | ||
23 | extern void __ndelay(unsigned long nsecs); | ||
24 | extern void __delay(unsigned long loops); | ||
25 | |||
26 | #define udelay(n) (__builtin_constant_p(n) ? \ | ||
27 | ((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \ | ||
28 | __udelay(n)) | ||
29 | |||
30 | #define ndelay(n) (__builtin_constant_p(n) ? \ | ||
31 | ((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \ | ||
32 | __ndelay(n)) | ||
33 | |||
34 | #endif /* _ASM_TILE_DELAY_H */ | ||
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h new file mode 100644 index 000000000000..f0a4c256403b --- /dev/null +++ b/arch/tile/include/asm/device.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/device.h> | |||
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/arch/tile/include/asm/div64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/div64.h> | |||
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h new file mode 100644 index 000000000000..cf466b39aa13 --- /dev/null +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_MAPPING_H | ||
16 | #define _ASM_TILE_DMA_MAPPING_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/cache.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | /* | ||
24 | * Note that on x86 and powerpc, there is a "struct dma_mapping_ops" | ||
25 | * that is used for all the DMA operations. For now, we don't have an | ||
26 | * equivalent on tile, because we only have a single way of doing DMA. | ||
27 | * (Tilera bug 7994 to use dma_mapping_ops.) | ||
28 | */ | ||
29 | |||
30 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
31 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
32 | |||
33 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
34 | enum dma_data_direction); | ||
35 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
36 | size_t size, enum dma_data_direction); | ||
37 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
38 | enum dma_data_direction); | ||
39 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
40 | int nhwentries, enum dma_data_direction); | ||
41 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
42 | unsigned long offset, size_t size, | ||
43 | enum dma_data_direction); | ||
44 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
45 | size_t size, enum dma_data_direction); | ||
46 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
47 | int nelems, enum dma_data_direction); | ||
48 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
49 | int nelems, enum dma_data_direction); | ||
50 | |||
51 | |||
52 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
53 | dma_addr_t *dma_handle, gfp_t flag); | ||
54 | |||
55 | void dma_free_coherent(struct device *dev, size_t size, | ||
56 | void *vaddr, dma_addr_t dma_handle); | ||
57 | |||
58 | extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, | ||
59 | enum dma_data_direction); | ||
60 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, | ||
61 | size_t, enum dma_data_direction); | ||
62 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, | ||
63 | unsigned long offset, size_t, | ||
64 | enum dma_data_direction); | ||
65 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, | ||
66 | unsigned long offset, size_t, | ||
67 | enum dma_data_direction); | ||
68 | extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); | ||
69 | |||
70 | static inline int | ||
71 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static inline int | ||
77 | dma_supported(struct device *dev, u64 mask) | ||
78 | { | ||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | dma_set_mask(struct device *dev, u64 mask) | ||
84 | { | ||
85 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
86 | return -EIO; | ||
87 | |||
88 | *dev->dma_mask = mask; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline int | ||
94 | dma_get_cache_alignment(void) | ||
95 | { | ||
96 | return L2_CACHE_BYTES; | ||
97 | } | ||
98 | |||
99 | #define dma_is_consistent(d, h) (1) | ||
100 | |||
101 | |||
102 | #endif /* _ASM_TILE_DMA_MAPPING_H */ | ||
diff --git a/arch/tile/include/asm/dma.h b/arch/tile/include/asm/dma.h new file mode 100644 index 000000000000..12a7ca16d164 --- /dev/null +++ b/arch/tile/include/asm/dma.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_H | ||
16 | #define _ASM_TILE_DMA_H | ||
17 | |||
18 | #include <asm-generic/dma.h> | ||
19 | |||
20 | /* Needed by drivers/pci/quirks.c */ | ||
21 | #ifdef CONFIG_PCI | ||
22 | extern int isa_dma_bridge_buggy; | ||
23 | #endif | ||
24 | |||
25 | #endif /* _ASM_TILE_DMA_H */ | ||
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h new file mode 100644 index 000000000000..1bca0debdb0f --- /dev/null +++ b/arch/tile/include/asm/elf.h | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_ELF_H | ||
16 | #define _ASM_TILE_ELF_H | ||
17 | |||
18 | /* | ||
19 | * ELF register definitions. | ||
20 | */ | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #include <linux/ptrace.h> | ||
25 | #include <asm/byteorder.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | typedef unsigned long elf_greg_t; | ||
29 | |||
30 | #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) | ||
31 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
32 | |||
33 | #define EM_TILE64 187 | ||
34 | #define EM_TILEPRO 188 | ||
35 | #define EM_TILEGX 191 | ||
36 | |||
37 | /* Provide a nominal data structure. */ | ||
38 | #define ELF_NFPREG 0 | ||
39 | typedef double elf_fpreg_t; | ||
40 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
41 | |||
42 | #ifdef __tilegx__ | ||
43 | #define ELF_CLASS ELFCLASS64 | ||
44 | #else | ||
45 | #define ELF_CLASS ELFCLASS32 | ||
46 | #endif | ||
47 | #define ELF_DATA ELFDATA2LSB | ||
48 | |||
49 | /* | ||
50 | * There seems to be a bug in how compat_binfmt_elf.c works: it | ||
51 | * #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info(). | ||
52 | * Hack around this by providing an enum value of ELF_ARCH. | ||
53 | */ | ||
54 | enum { ELF_ARCH = CHIP_ELF_TYPE() }; | ||
55 | #define ELF_ARCH ELF_ARCH | ||
56 | |||
57 | /* | ||
58 | * This is used to ensure we don't load something for the wrong architecture. | ||
59 | */ | ||
60 | #define elf_check_arch(x) \ | ||
61 | ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ | ||
62 | ((x)->e_machine == CHIP_ELF_TYPE() || \ | ||
63 | (x)->e_machine == CHIP_COMPAT_ELF_TYPE())) | ||
64 | |||
65 | /* The module loader only handles a few relocation types. */ | ||
66 | #ifndef __tilegx__ | ||
67 | #define R_TILE_32 1 | ||
68 | #define R_TILE_JOFFLONG_X1 15 | ||
69 | #define R_TILE_IMM16_X0_LO 25 | ||
70 | #define R_TILE_IMM16_X1_LO 26 | ||
71 | #define R_TILE_IMM16_X0_HA 29 | ||
72 | #define R_TILE_IMM16_X1_HA 30 | ||
73 | #else | ||
74 | #define R_TILEGX_64 1 | ||
75 | #define R_TILEGX_JUMPOFF_X1 21 | ||
76 | #define R_TILEGX_IMM16_X0_HW0 36 | ||
77 | #define R_TILEGX_IMM16_X1_HW0 37 | ||
78 | #define R_TILEGX_IMM16_X0_HW1 38 | ||
79 | #define R_TILEGX_IMM16_X1_HW1 39 | ||
80 | #define R_TILEGX_IMM16_X0_HW2_LAST 48 | ||
81 | #define R_TILEGX_IMM16_X1_HW2_LAST 49 | ||
82 | #endif | ||
83 | |||
84 | /* Use standard page size for core dumps. */ | ||
85 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | ||
86 | |||
87 | /* | ||
88 | * This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
89 | * use of this is to invoke "./ld.so someprog" to test out a new version of | ||
90 | * the loader. We need to make sure that it is out of the way of the program | ||
91 | * that it will "exec", and that there is sufficient room for the brk. | ||
92 | */ | ||
93 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
94 | |||
95 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | ||
96 | memcpy((char *) &_dest, (char *) _regs, \ | ||
97 | sizeof(struct pt_regs)); | ||
98 | |||
99 | /* No additional FP registers to copy. */ | ||
100 | #define ELF_CORE_COPY_FPREGS(t, fpu) 0 | ||
101 | |||
102 | /* | ||
103 | * This yields a mask that user programs can use to figure out what | ||
104 | * instruction set this CPU supports. This could be done in user space, | ||
105 | * but it's not easy, and we've already done it here. | ||
106 | */ | ||
107 | #define ELF_HWCAP (0) | ||
108 | |||
109 | /* | ||
110 | * This yields a string that ld.so will use to load implementation | ||
111 | * specific libraries for optimization. This is more specific in | ||
112 | * intent than poking at uname or /proc/cpuinfo. | ||
113 | */ | ||
114 | #define ELF_PLATFORM (NULL) | ||
115 | |||
116 | extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr); | ||
117 | |||
118 | #define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr) | ||
119 | |||
120 | extern int dump_task_regs(struct task_struct *, elf_gregset_t *); | ||
121 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
122 | |||
123 | /* Tilera Linux has no personalities currently, so no need to do anything. */ | ||
124 | #define SET_PERSONALITY(ex) do { } while (0) | ||
125 | |||
126 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
127 | /* Support auto-mapping of the user interrupt vectors. */ | ||
128 | struct linux_binprm; | ||
129 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
130 | int executable_stack); | ||
131 | #ifdef CONFIG_COMPAT | ||
132 | |||
133 | #define COMPAT_ELF_PLATFORM "tilegx-m32" | ||
134 | |||
135 | /* | ||
136 | * "Compat" binaries have the same machine type, but 32-bit class, | ||
137 | * since they're not a separate machine type, but just a 32-bit | ||
138 | * variant of the standard 64-bit architecture. | ||
139 | */ | ||
140 | #define compat_elf_check_arch(x) \ | ||
141 | ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \ | ||
142 | ((x)->e_machine == CHIP_ELF_TYPE() || \ | ||
143 | (x)->e_machine == CHIP_COMPAT_ELF_TYPE())) | ||
144 | |||
145 | #define compat_start_thread(regs, ip, usp) do { \ | ||
146 | regs->pc = ptr_to_compat_reg((void *)(ip)); \ | ||
147 | regs->sp = ptr_to_compat_reg((void *)(usp)); \ | ||
148 | } while (0) | ||
149 | |||
150 | /* | ||
151 | * Use SET_PERSONALITY to indicate compatibility via TS_COMPAT. | ||
152 | */ | ||
153 | #undef SET_PERSONALITY | ||
154 | #define SET_PERSONALITY(ex) \ | ||
155 | do { \ | ||
156 | current->personality = PER_LINUX; \ | ||
157 | current_thread_info()->status &= ~TS_COMPAT; \ | ||
158 | } while (0) | ||
159 | #define COMPAT_SET_PERSONALITY(ex) \ | ||
160 | do { \ | ||
161 | current->personality = PER_LINUX_32BIT; \ | ||
162 | current_thread_info()->status |= TS_COMPAT; \ | ||
163 | } while (0) | ||
164 | |||
165 | #define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2) | ||
166 | |||
167 | #endif /* CONFIG_COMPAT */ | ||
168 | |||
169 | #endif /* _ASM_TILE_ELF_H */ | ||
diff --git a/arch/tile/include/asm/emergency-restart.h b/arch/tile/include/asm/emergency-restart.h new file mode 100644 index 000000000000..3711bd9d50bd --- /dev/null +++ b/arch/tile/include/asm/emergency-restart.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/emergency-restart.h> | |||
diff --git a/arch/tile/include/asm/errno.h b/arch/tile/include/asm/errno.h new file mode 100644 index 000000000000..4c82b503d92f --- /dev/null +++ b/arch/tile/include/asm/errno.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/errno.h> | |||
diff --git a/arch/tile/include/asm/fcntl.h b/arch/tile/include/asm/fcntl.h new file mode 100644 index 000000000000..46ab12db5739 --- /dev/null +++ b/arch/tile/include/asm/fcntl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/fcntl.h> | |||
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h new file mode 100644 index 000000000000..51537ff9265a --- /dev/null +++ b/arch/tile/include/asm/fixmap.h | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1998 Ingo Molnar | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_FIXMAP_H | ||
17 | #define _ASM_TILE_FIXMAP_H | ||
18 | |||
19 | #include <asm/page.h> | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | #include <linux/kernel.h> | ||
23 | #ifdef CONFIG_HIGHMEM | ||
24 | #include <linux/threads.h> | ||
25 | #include <asm/kmap_types.h> | ||
26 | #endif | ||
27 | |||
28 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
29 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
30 | |||
31 | /* | ||
32 | * Here we define all the compile-time 'special' virtual | ||
33 | * addresses. The point is to have a constant address at | ||
34 | * compile time, but to set the physical address only | ||
35 | * in the boot process. We allocate these special addresses | ||
36 | * from the end of supervisor virtual memory backwards. | ||
37 | * Also this lets us do fail-safe vmalloc(), we | ||
38 | * can guarantee that these special addresses and | ||
39 | * vmalloc()-ed addresses never overlap. | ||
40 | * | ||
41 | * these 'compile-time allocated' memory buffers are | ||
42 | * fixed-size 4k pages. (or larger if used with an increment | ||
43 | * higher than 1) use fixmap_set(idx,phys) to associate | ||
44 | * physical memory with fixmap indices. | ||
45 | * | ||
46 | * TLB entries of such buffers will not be flushed across | ||
47 | * task switches. | ||
48 | * | ||
49 | * We don't bother with a FIX_HOLE since above the fixmaps | ||
50 | * is unmapped memory in any case. | ||
51 | */ | ||
52 | enum fixed_addresses { | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
55 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
56 | #endif | ||
57 | __end_of_permanent_fixed_addresses, | ||
58 | |||
59 | /* | ||
60 | * Temporary boot-time mappings, used before ioremap() is functional. | ||
61 | * Not currently needed by the Tile architecture. | ||
62 | */ | ||
63 | #define NR_FIX_BTMAPS 0 | ||
64 | #if NR_FIX_BTMAPS | ||
65 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | ||
66 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, | ||
67 | __end_of_fixed_addresses | ||
68 | #else | ||
69 | __end_of_fixed_addresses = __end_of_permanent_fixed_addresses | ||
70 | #endif | ||
71 | }; | ||
72 | |||
73 | extern void __set_fixmap(enum fixed_addresses idx, | ||
74 | unsigned long phys, pgprot_t flags); | ||
75 | |||
76 | #define set_fixmap(idx, phys) \ | ||
77 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
78 | /* | ||
79 | * Some hardware wants to get fixmapped without caching. | ||
80 | */ | ||
81 | #define set_fixmap_nocache(idx, phys) \ | ||
82 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
83 | |||
84 | #define clear_fixmap(idx) \ | ||
85 | __set_fixmap(idx, 0, __pgprot(0)) | ||
86 | |||
87 | #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
88 | #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
89 | #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) | ||
90 | #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) | ||
91 | |||
92 | extern void __this_fixmap_does_not_exist(void); | ||
93 | |||
94 | /* | ||
95 | * 'index to address' translation. If anyone tries to use the idx | ||
96 | * directly without tranlation, we catch the bug with a NULL-deference | ||
97 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
98 | */ | ||
99 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
100 | { | ||
101 | /* | ||
102 | * this branch gets completely eliminated after inlining, | ||
103 | * except when someone tries to use fixaddr indices in an | ||
104 | * illegal way. (such as mixing up address types or using | ||
105 | * out-of-range indices). | ||
106 | * | ||
107 | * If it doesn't get removed, the linker will complain | ||
108 | * loudly with a reasonably clear error message.. | ||
109 | */ | ||
110 | if (idx >= __end_of_fixed_addresses) | ||
111 | __this_fixmap_does_not_exist(); | ||
112 | |||
113 | return __fix_to_virt(idx); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
117 | { | ||
118 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
119 | return __virt_to_fix(vaddr); | ||
120 | } | ||
121 | |||
122 | #endif /* !__ASSEMBLY__ */ | ||
123 | |||
124 | #endif /* _ASM_TILE_FIXMAP_H */ | ||
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h new file mode 100644 index 000000000000..461459b06d98 --- /dev/null +++ b/arch/tile/include/asm/ftrace.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_FTRACE_H | ||
16 | #define _ASM_TILE_FTRACE_H | ||
17 | |||
18 | /* empty */ | ||
19 | |||
20 | #endif /* _ASM_TILE_FTRACE_H */ | ||
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h new file mode 100644 index 000000000000..9eaeb3c08786 --- /dev/null +++ b/arch/tile/include/asm/futex.h | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * These routines make two important assumptions: | ||
15 | * | ||
16 | * 1. atomic_t is really an int and can be freely cast back and forth | ||
17 | * (validated in __init_atomic_per_cpu). | ||
18 | * | ||
19 | * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using | ||
20 | * the same locking convention that all the kernel atomic routines use. | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_TILE_FUTEX_H | ||
24 | #define _ASM_TILE_FUTEX_H | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | #include <linux/futex.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/errno.h> | ||
31 | |||
32 | extern struct __get_user futex_set(int *v, int i); | ||
33 | extern struct __get_user futex_add(int *v, int n); | ||
34 | extern struct __get_user futex_or(int *v, int n); | ||
35 | extern struct __get_user futex_andn(int *v, int n); | ||
36 | extern struct __get_user futex_cmpxchg(int *v, int o, int n); | ||
37 | |||
38 | #ifndef __tilegx__ | ||
39 | extern struct __get_user futex_xor(int *v, int n); | ||
40 | #else | ||
41 | static inline struct __get_user futex_xor(int __user *uaddr, int n) | ||
42 | { | ||
43 | struct __get_user asm_ret = __get_user_4(uaddr); | ||
44 | if (!asm_ret.err) { | ||
45 | int oldval, newval; | ||
46 | do { | ||
47 | oldval = asm_ret.val; | ||
48 | newval = oldval ^ n; | ||
49 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
50 | } while (asm_ret.err == 0 && oldval != asm_ret.val); | ||
51 | } | ||
52 | return asm_ret; | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
57 | { | ||
58 | int op = (encoded_op >> 28) & 7; | ||
59 | int cmp = (encoded_op >> 24) & 15; | ||
60 | int oparg = (encoded_op << 8) >> 20; | ||
61 | int cmparg = (encoded_op << 20) >> 20; | ||
62 | int ret; | ||
63 | struct __get_user asm_ret; | ||
64 | |||
65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
66 | oparg = 1 << oparg; | ||
67 | |||
68 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
69 | return -EFAULT; | ||
70 | |||
71 | pagefault_disable(); | ||
72 | switch (op) { | ||
73 | case FUTEX_OP_SET: | ||
74 | asm_ret = futex_set(uaddr, oparg); | ||
75 | break; | ||
76 | case FUTEX_OP_ADD: | ||
77 | asm_ret = futex_add(uaddr, oparg); | ||
78 | break; | ||
79 | case FUTEX_OP_OR: | ||
80 | asm_ret = futex_or(uaddr, oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_ANDN: | ||
83 | asm_ret = futex_andn(uaddr, oparg); | ||
84 | break; | ||
85 | case FUTEX_OP_XOR: | ||
86 | asm_ret = futex_xor(uaddr, oparg); | ||
87 | break; | ||
88 | default: | ||
89 | asm_ret.err = -ENOSYS; | ||
90 | } | ||
91 | pagefault_enable(); | ||
92 | |||
93 | ret = asm_ret.err; | ||
94 | |||
95 | if (!ret) { | ||
96 | switch (cmp) { | ||
97 | case FUTEX_OP_CMP_EQ: | ||
98 | ret = (asm_ret.val == cmparg); | ||
99 | break; | ||
100 | case FUTEX_OP_CMP_NE: | ||
101 | ret = (asm_ret.val != cmparg); | ||
102 | break; | ||
103 | case FUTEX_OP_CMP_LT: | ||
104 | ret = (asm_ret.val < cmparg); | ||
105 | break; | ||
106 | case FUTEX_OP_CMP_GE: | ||
107 | ret = (asm_ret.val >= cmparg); | ||
108 | break; | ||
109 | case FUTEX_OP_CMP_LE: | ||
110 | ret = (asm_ret.val <= cmparg); | ||
111 | break; | ||
112 | case FUTEX_OP_CMP_GT: | ||
113 | ret = (asm_ret.val > cmparg); | ||
114 | break; | ||
115 | default: | ||
116 | ret = -ENOSYS; | ||
117 | } | ||
118 | } | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | ||
123 | int newval) | ||
124 | { | ||
125 | struct __get_user asm_ret; | ||
126 | |||
127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
128 | return -EFAULT; | ||
129 | |||
130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
131 | return asm_ret.err ? asm_ret.err : asm_ret.val; | ||
132 | } | ||
133 | |||
134 | #endif /* !__ASSEMBLY__ */ | ||
135 | |||
136 | #endif /* _ASM_TILE_FUTEX_H */ | ||
diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h new file mode 100644 index 000000000000..822390f9a154 --- /dev/null +++ b/arch/tile/include/asm/hardirq.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HARDIRQ_H | ||
16 | #define _ASM_TILE_HARDIRQ_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/cache.h> | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | |||
23 | typedef struct { | ||
24 | unsigned int __softirq_pending; | ||
25 | long idle_timestamp; | ||
26 | |||
27 | /* Hard interrupt statistics. */ | ||
28 | unsigned int irq_timer_count; | ||
29 | unsigned int irq_syscall_count; | ||
30 | unsigned int irq_resched_count; | ||
31 | unsigned int irq_hv_flush_count; | ||
32 | unsigned int irq_call_count; | ||
33 | unsigned int irq_hv_msg_count; | ||
34 | unsigned int irq_dev_intr_count; | ||
35 | |||
36 | } ____cacheline_aligned irq_cpustat_t; | ||
37 | |||
38 | DECLARE_PER_CPU(irq_cpustat_t, irq_stat); | ||
39 | |||
40 | #define __ARCH_IRQ_STAT | ||
41 | #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) | ||
42 | |||
43 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
44 | |||
45 | #define HARDIRQ_BITS 8 | ||
46 | |||
47 | #endif /* _ASM_TILE_HARDIRQ_H */ | ||
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h new file mode 100644 index 000000000000..efdd12e91020 --- /dev/null +++ b/arch/tile/include/asm/highmem.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
3 | * Gerhard.Wichert@pdb.siemens.de | ||
4 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation, version 2. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
17 | * are not addressable by direct kernel virtual addresses. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_TILE_HIGHMEM_H | ||
22 | #define _ASM_TILE_HIGHMEM_H | ||
23 | |||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/homecache.h> | ||
29 | |||
30 | /* declarations for highmem.c */ | ||
31 | extern unsigned long highstart_pfn, highend_pfn; | ||
32 | |||
33 | extern pte_t *pkmap_page_table; | ||
34 | |||
35 | /* | ||
36 | * Ordering is: | ||
37 | * | ||
38 | * FIXADDR_TOP | ||
39 | * fixed_addresses | ||
40 | * FIXADDR_START | ||
41 | * temp fixed addresses | ||
42 | * FIXADDR_BOOT_START | ||
43 | * Persistent kmap area | ||
44 | * PKMAP_BASE | ||
45 | * VMALLOC_END | ||
46 | * Vmalloc area | ||
47 | * VMALLOC_START | ||
48 | * high_memory | ||
49 | */ | ||
50 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | ||
51 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | ||
52 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
53 | |||
54 | void *kmap_high(struct page *page); | ||
55 | void kunmap_high(struct page *page); | ||
56 | void *kmap(struct page *page); | ||
57 | void kunmap(struct page *page); | ||
58 | void *kmap_fix_kpte(struct page *page, int finished); | ||
59 | |||
60 | /* This macro is used only in map_new_virtual() to map "page". */ | ||
61 | #define kmap_prot page_to_kpgprot(page) | ||
62 | |||
63 | void kunmap_atomic(void *kvaddr, enum km_type type); | ||
64 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | ||
65 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
66 | struct page *kmap_atomic_to_page(void *ptr); | ||
67 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | ||
68 | void *kmap_atomic(struct page *page, enum km_type type); | ||
69 | void kmap_atomic_fix_kpte(struct page *page, int finished); | ||
70 | |||
71 | #define flush_cache_kmaps() do { } while (0) | ||
72 | |||
73 | #endif /* _ASM_TILE_HIGHMEM_H */ | ||
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h new file mode 100644 index 000000000000..a8243865d49e --- /dev/null +++ b/arch/tile/include/asm/homecache.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Handle issues around the Tile "home cache" model of coherence. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_HOMECACHE_H | ||
18 | #define _ASM_TILE_HOMECACHE_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | |||
23 | struct page; | ||
24 | struct task_struct; | ||
25 | struct vm_area_struct; | ||
26 | struct zone; | ||
27 | |||
28 | /* | ||
29 | * Coherence point for the page is its memory controller. | ||
30 | * It is not present in any cache (L1 or L2). | ||
31 | */ | ||
32 | #define PAGE_HOME_UNCACHED -1 | ||
33 | |||
34 | /* | ||
35 | * Is this page immutable (unwritable) and thus able to be cached more | ||
36 | * widely than would otherwise be possible? On tile64 this means we | ||
37 | * mark the PTE to cache locally; on tilepro it means we have "nc" set. | ||
38 | */ | ||
39 | #define PAGE_HOME_IMMUTABLE -2 | ||
40 | |||
41 | /* | ||
42 | * Each cpu considers its own cache to be the home for the page, | ||
43 | * which makes it incoherent. | ||
44 | */ | ||
45 | #define PAGE_HOME_INCOHERENT -3 | ||
46 | |||
47 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
48 | /* Home for the page is distributed via hash-for-home. */ | ||
49 | #define PAGE_HOME_HASH -4 | ||
50 | #endif | ||
51 | |||
52 | /* Homing is unknown or unspecified. Not valid for page_home(). */ | ||
53 | #define PAGE_HOME_UNKNOWN -5 | ||
54 | |||
55 | /* Home on the current cpu. Not valid for page_home(). */ | ||
56 | #define PAGE_HOME_HERE -6 | ||
57 | |||
58 | /* Support wrapper to use instead of explicit hv_flush_remote(). */ | ||
59 | extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, | ||
60 | const struct cpumask *cache_cpumask, | ||
61 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
62 | unsigned long tlb_pgsize, | ||
63 | const struct cpumask *tlb_cpumask, | ||
64 | HV_Remote_ASID *asids, int asidcount); | ||
65 | |||
66 | /* Set homing-related bits in a PTE (can also pass a pgprot_t). */ | ||
67 | extern pte_t pte_set_home(pte_t pte, int home); | ||
68 | |||
69 | /* Do a cache eviction on the specified cpus. */ | ||
70 | extern void homecache_evict(const struct cpumask *mask); | ||
71 | |||
72 | /* | ||
73 | * Change a kernel page's homecache. It must not be mapped in user space. | ||
74 | * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when | ||
75 | * no other cpu can reference the page, and causes a full-chip cache/TLB flush. | ||
76 | */ | ||
77 | extern void homecache_change_page_home(struct page *, int order, int home); | ||
78 | |||
79 | /* | ||
80 | * Flush a page out of whatever cache(s) it is in. | ||
81 | * This is more than just finv, since it properly handles waiting | ||
82 | * for the data to reach memory on tilepro, but it can be quite | ||
83 | * heavyweight, particularly on hash-for-home memory. | ||
84 | */ | ||
85 | extern void homecache_flush_cache(struct page *, int order); | ||
86 | |||
87 | /* | ||
88 | * Allocate a page with the given GFP flags, home, and optionally | ||
89 | * node. These routines are actually just wrappers around the normal | ||
90 | * alloc_pages() / alloc_pages_node() functions, which set and clear | ||
91 | * a per-cpu variable to communicate with homecache_new_kernel_page(). | ||
92 | * If !CONFIG_HOMECACHE, uses homecache_change_page_home(). | ||
93 | */ | ||
94 | extern struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
95 | unsigned int order, int home); | ||
96 | extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
97 | unsigned int order, int home); | ||
98 | #define homecache_alloc_page(gfp_mask, home) \ | ||
99 | homecache_alloc_pages(gfp_mask, 0, home) | ||
100 | |||
101 | /* | ||
102 | * These routines are just pass-throughs to free_pages() when | ||
103 | * we support full homecaching. If !CONFIG_HOMECACHE, then these | ||
104 | * routines use homecache_change_page_home() to reset the home | ||
105 | * back to the default before returning the page to the allocator. | ||
106 | */ | ||
107 | void homecache_free_pages(unsigned long addr, unsigned int order); | ||
108 | #define homecache_free_page(page) \ | ||
109 | homecache_free_pages((page), 0) | ||
110 | |||
111 | |||
112 | |||
113 | /* | ||
114 | * Report the page home for LOWMEM pages by examining their kernel PTE, | ||
115 | * or for highmem pages as the default home. | ||
116 | */ | ||
117 | extern int page_home(struct page *); | ||
118 | |||
119 | #define homecache_migrate_kthread() do {} while (0) | ||
120 | |||
121 | #define homecache_kpte_lock() 0 | ||
122 | #define homecache_kpte_unlock(flags) do {} while (0) | ||
123 | |||
124 | |||
125 | #endif /* _ASM_TILE_HOMECACHE_H */ | ||
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h new file mode 100644 index 000000000000..0521c277bbde --- /dev/null +++ b/arch/tile/include/asm/hugetlb.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HUGETLB_H | ||
16 | #define _ASM_TILE_HUGETLB_H | ||
17 | |||
18 | #include <asm/page.h> | ||
19 | |||
20 | |||
21 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
22 | unsigned long addr, | ||
23 | unsigned long len) { | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * If the arch doesn't supply something else, assume that hugepage | ||
29 | * size aligned regions are ok without further preparation. | ||
30 | */ | ||
31 | static inline int prepare_hugepage_range(struct file *file, | ||
32 | unsigned long addr, unsigned long len) | ||
33 | { | ||
34 | struct hstate *h = hstate_file(file); | ||
35 | if (len & ~huge_page_mask(h)) | ||
36 | return -EINVAL; | ||
37 | if (addr & ~huge_page_mask(h)) | ||
38 | return -EINVAL; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
47 | unsigned long addr, unsigned long end, | ||
48 | unsigned long floor, | ||
49 | unsigned long ceiling) | ||
50 | { | ||
51 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
52 | } | ||
53 | |||
54 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
55 | pte_t *ptep, pte_t pte) | ||
56 | { | ||
57 | set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER); | ||
58 | } | ||
59 | |||
60 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
61 | unsigned long addr, pte_t *ptep) | ||
62 | { | ||
63 | return ptep_get_and_clear(mm, addr, ptep); | ||
64 | } | ||
65 | |||
66 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
67 | unsigned long addr, pte_t *ptep) | ||
68 | { | ||
69 | ptep_clear_flush(vma, addr, ptep); | ||
70 | } | ||
71 | |||
72 | static inline int huge_pte_none(pte_t pte) | ||
73 | { | ||
74 | return pte_none(pte); | ||
75 | } | ||
76 | |||
77 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
78 | { | ||
79 | return pte_wrprotect(pte); | ||
80 | } | ||
81 | |||
82 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
83 | unsigned long addr, pte_t *ptep) | ||
84 | { | ||
85 | ptep_set_wrprotect(mm, addr, ptep); | ||
86 | } | ||
87 | |||
88 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
89 | unsigned long addr, pte_t *ptep, | ||
90 | pte_t pte, int dirty) | ||
91 | { | ||
92 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
93 | } | ||
94 | |||
95 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
96 | { | ||
97 | return *ptep; | ||
98 | } | ||
99 | |||
100 | static inline int arch_prepare_hugepage(struct page *page) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline void arch_release_hugepage(struct page *page) | ||
106 | { | ||
107 | } | ||
108 | |||
109 | #endif /* _ASM_TILE_HUGETLB_H */ | ||
diff --git a/arch/tile/include/asm/hv_driver.h b/arch/tile/include/asm/hv_driver.h new file mode 100644 index 000000000000..ad614de899b3 --- /dev/null +++ b/arch/tile/include/asm/hv_driver.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This header defines a wrapper interface for managing hypervisor | ||
15 | * device calls that will result in an interrupt at some later time. | ||
16 | * In particular, this provides wrappers for hv_preada() and | ||
17 | * hv_pwritea(). | ||
18 | */ | ||
19 | |||
20 | #ifndef _ASM_TILE_HV_DRIVER_H | ||
21 | #define _ASM_TILE_HV_DRIVER_H | ||
22 | |||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | struct hv_driver_cb; | ||
26 | |||
27 | /* A callback to be invoked when an operation completes. */ | ||
28 | typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result); | ||
29 | |||
30 | /* | ||
31 | * A structure to hold information about an outstanding call. | ||
32 | * The driver must allocate a separate structure for each call. | ||
33 | */ | ||
34 | struct hv_driver_cb { | ||
35 | hv_driver_callback_t *callback; /* Function to call on interrupt. */ | ||
36 | void *dev; /* Driver-specific state variable. */ | ||
37 | }; | ||
38 | |||
39 | /* Wrapper for invoking hv_dev_preada(). */ | ||
40 | static inline int | ||
41 | tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
42 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
43 | struct hv_driver_cb *callback) | ||
44 | { | ||
45 | return hv_dev_preada(devhdl, flags, sgl_len, sgl, | ||
46 | offset, (HV_IntArg)callback); | ||
47 | } | ||
48 | |||
49 | /* Wrapper for invoking hv_dev_pwritea(). */ | ||
50 | static inline int | ||
51 | tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
52 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
53 | struct hv_driver_cb *callback) | ||
54 | { | ||
55 | return hv_dev_pwritea(devhdl, flags, sgl_len, sgl, | ||
56 | offset, (HV_IntArg)callback); | ||
57 | } | ||
58 | |||
59 | |||
60 | #endif /* _ASM_TILE_HV_DRIVER_H */ | ||
diff --git a/arch/tile/include/asm/hw_irq.h b/arch/tile/include/asm/hw_irq.h new file mode 100644 index 000000000000..4fac5fbf333e --- /dev/null +++ b/arch/tile/include/asm/hw_irq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HW_IRQ_H | ||
16 | #define _ASM_TILE_HW_IRQ_H | ||
17 | |||
18 | #endif /* _ASM_TILE_HW_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/ide.h b/arch/tile/include/asm/ide.h new file mode 100644 index 000000000000..3c6f2ed894ce --- /dev/null +++ b/arch/tile/include/asm/ide.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IDE_H | ||
16 | #define _ASM_TILE_IDE_H | ||
17 | |||
18 | /* For IDE on PCI */ | ||
19 | #define MAX_HWIFS 10 | ||
20 | |||
21 | #define ide_default_io_ctl(base) (0) | ||
22 | |||
23 | #include <asm-generic/ide_iops.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_IDE_H */ | ||
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h new file mode 100644 index 000000000000..8c95bef3fa45 --- /dev/null +++ b/arch/tile/include/asm/io.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IO_H | ||
16 | #define _ASM_TILE_IO_H | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/bug.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | #define IO_SPACE_LIMIT 0xfffffffful | ||
23 | |||
24 | /* | ||
25 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
26 | * access. | ||
27 | */ | ||
28 | #define xlate_dev_mem_ptr(p) __va(p) | ||
29 | |||
30 | /* | ||
31 | * Convert a virtual cached pointer to an uncached pointer. | ||
32 | */ | ||
33 | #define xlate_dev_kmem_ptr(p) p | ||
34 | |||
35 | /* | ||
36 | * Change "struct page" to physical address. | ||
37 | */ | ||
38 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | ||
39 | |||
40 | /* | ||
41 | * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to | ||
42 | * long before casting it to a pointer to avoid compiler warnings. | ||
43 | */ | ||
44 | #if CHIP_HAS_MMIO() | ||
45 | extern void __iomem *ioremap(resource_size_t offset, unsigned long size); | ||
46 | extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, | ||
47 | pgprot_t pgprot); | ||
48 | extern void iounmap(volatile void __iomem *addr); | ||
49 | #else | ||
50 | #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr)) | ||
51 | #define iounmap(addr) ((void)0) | ||
52 | #endif | ||
53 | |||
54 | #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) | ||
55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) | ||
56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) | ||
57 | |||
58 | void __iomem *ioport_map(unsigned long port, unsigned int len); | ||
59 | extern inline void ioport_unmap(void __iomem *addr) {} | ||
60 | |||
61 | #define mmiowb() | ||
62 | |||
63 | /* Conversion between virtual and physical mappings. */ | ||
64 | #define mm_ptov(addr) ((void *)phys_to_virt(addr)) | ||
65 | #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr)) | ||
66 | |||
67 | #ifdef CONFIG_PCI | ||
68 | |||
69 | extern u8 _tile_readb(unsigned long addr); | ||
70 | extern u16 _tile_readw(unsigned long addr); | ||
71 | extern u32 _tile_readl(unsigned long addr); | ||
72 | extern u64 _tile_readq(unsigned long addr); | ||
73 | extern void _tile_writeb(u8 val, unsigned long addr); | ||
74 | extern void _tile_writew(u16 val, unsigned long addr); | ||
75 | extern void _tile_writel(u32 val, unsigned long addr); | ||
76 | extern void _tile_writeq(u64 val, unsigned long addr); | ||
77 | |||
78 | #else | ||
79 | |||
80 | /* | ||
81 | * The Tile architecture does not support IOMEM unless PCI is enabled. | ||
82 | * Unfortunately we can't yet simply not declare these methods, | ||
83 | * since some generic code that compiles into the kernel, but | ||
84 | * we never run, uses them unconditionally. | ||
85 | */ | ||
86 | |||
87 | static inline int iomem_panic(void) | ||
88 | { | ||
89 | panic("readb/writeb and friends do not exist on tile without PCI"); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline u8 _tile_readb(unsigned long addr) | ||
94 | { | ||
95 | return iomem_panic(); | ||
96 | } | ||
97 | |||
98 | static inline u16 _tile_readw(unsigned long addr) | ||
99 | { | ||
100 | return iomem_panic(); | ||
101 | } | ||
102 | |||
103 | static inline u32 _tile_readl(unsigned long addr) | ||
104 | { | ||
105 | return iomem_panic(); | ||
106 | } | ||
107 | |||
108 | static inline u64 _tile_readq(unsigned long addr) | ||
109 | { | ||
110 | return iomem_panic(); | ||
111 | } | ||
112 | |||
113 | static inline void _tile_writeb(u8 val, unsigned long addr) | ||
114 | { | ||
115 | iomem_panic(); | ||
116 | } | ||
117 | |||
118 | static inline void _tile_writew(u16 val, unsigned long addr) | ||
119 | { | ||
120 | iomem_panic(); | ||
121 | } | ||
122 | |||
123 | static inline void _tile_writel(u32 val, unsigned long addr) | ||
124 | { | ||
125 | iomem_panic(); | ||
126 | } | ||
127 | |||
128 | static inline void _tile_writeq(u64 val, unsigned long addr) | ||
129 | { | ||
130 | iomem_panic(); | ||
131 | } | ||
132 | |||
133 | #endif | ||
134 | |||
135 | #define readb(addr) _tile_readb((unsigned long)addr) | ||
136 | #define readw(addr) _tile_readw((unsigned long)addr) | ||
137 | #define readl(addr) _tile_readl((unsigned long)addr) | ||
138 | #define readq(addr) _tile_readq((unsigned long)addr) | ||
139 | #define writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||
140 | #define writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||
141 | #define writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||
142 | #define writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||
143 | |||
144 | #define __raw_readb readb | ||
145 | #define __raw_readw readw | ||
146 | #define __raw_readl readl | ||
147 | #define __raw_readq readq | ||
148 | #define __raw_writeb writeb | ||
149 | #define __raw_writew writew | ||
150 | #define __raw_writel writel | ||
151 | #define __raw_writeq writeq | ||
152 | |||
153 | #define readb_relaxed readb | ||
154 | #define readw_relaxed readw | ||
155 | #define readl_relaxed readl | ||
156 | #define readq_relaxed readq | ||
157 | |||
158 | #define ioread8 readb | ||
159 | #define ioread16 readw | ||
160 | #define ioread32 readl | ||
161 | #define ioread64 readq | ||
162 | #define iowrite8 writeb | ||
163 | #define iowrite16 writew | ||
164 | #define iowrite32 writel | ||
165 | #define iowrite64 writeq | ||
166 | |||
167 | static inline void *memcpy_fromio(void *dst, void *src, int len) | ||
168 | { | ||
169 | int x; | ||
170 | BUG_ON((unsigned long)src & 0x3); | ||
171 | for (x = 0; x < len; x += 4) | ||
172 | *(u32 *)(dst + x) = readl(src + x); | ||
173 | return dst; | ||
174 | } | ||
175 | |||
176 | static inline void *memcpy_toio(void *dst, void *src, int len) | ||
177 | { | ||
178 | int x; | ||
179 | BUG_ON((unsigned long)dst & 0x3); | ||
180 | for (x = 0; x < len; x += 4) | ||
181 | writel(*(u32 *)(src + x), dst + x); | ||
182 | return dst; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * The Tile architecture does not support IOPORT, even with PCI. | ||
187 | * Unfortunately we can't yet simply not declare these methods, | ||
188 | * since some generic code that compiles into the kernel, but | ||
189 | * we never run, uses them unconditionally. | ||
190 | */ | ||
191 | |||
192 | static inline int ioport_panic(void) | ||
193 | { | ||
194 | panic("inb/outb and friends do not exist on tile"); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static inline u8 inb(unsigned long addr) | ||
199 | { | ||
200 | return ioport_panic(); | ||
201 | } | ||
202 | |||
203 | static inline u16 inw(unsigned long addr) | ||
204 | { | ||
205 | return ioport_panic(); | ||
206 | } | ||
207 | |||
208 | static inline u32 inl(unsigned long addr) | ||
209 | { | ||
210 | return ioport_panic(); | ||
211 | } | ||
212 | |||
213 | static inline void outb(u8 b, unsigned long addr) | ||
214 | { | ||
215 | ioport_panic(); | ||
216 | } | ||
217 | |||
218 | static inline void outw(u16 b, unsigned long addr) | ||
219 | { | ||
220 | ioport_panic(); | ||
221 | } | ||
222 | |||
223 | static inline void outl(u32 b, unsigned long addr) | ||
224 | { | ||
225 | ioport_panic(); | ||
226 | } | ||
227 | |||
228 | #define inb_p(addr) inb(addr) | ||
229 | #define inw_p(addr) inw(addr) | ||
230 | #define inl_p(addr) inl(addr) | ||
231 | #define outb_p(x, addr) outb((x), (addr)) | ||
232 | #define outw_p(x, addr) outw((x), (addr)) | ||
233 | #define outl_p(x, addr) outl((x), (addr)) | ||
234 | |||
235 | static inline void insb(unsigned long addr, void *buffer, int count) | ||
236 | { | ||
237 | ioport_panic(); | ||
238 | } | ||
239 | |||
240 | static inline void insw(unsigned long addr, void *buffer, int count) | ||
241 | { | ||
242 | ioport_panic(); | ||
243 | } | ||
244 | |||
245 | static inline void insl(unsigned long addr, void *buffer, int count) | ||
246 | { | ||
247 | ioport_panic(); | ||
248 | } | ||
249 | |||
250 | static inline void outsb(unsigned long addr, const void *buffer, int count) | ||
251 | { | ||
252 | ioport_panic(); | ||
253 | } | ||
254 | |||
255 | static inline void outsw(unsigned long addr, const void *buffer, int count) | ||
256 | { | ||
257 | ioport_panic(); | ||
258 | } | ||
259 | |||
260 | static inline void outsl(unsigned long addr, const void *buffer, int count) | ||
261 | { | ||
262 | ioport_panic(); | ||
263 | } | ||
264 | |||
265 | #define ioread8_rep(p, dst, count) \ | ||
266 | insb((unsigned long) (p), (dst), (count)) | ||
267 | #define ioread16_rep(p, dst, count) \ | ||
268 | insw((unsigned long) (p), (dst), (count)) | ||
269 | #define ioread32_rep(p, dst, count) \ | ||
270 | insl((unsigned long) (p), (dst), (count)) | ||
271 | |||
272 | #define iowrite8_rep(p, src, count) \ | ||
273 | outsb((unsigned long) (p), (src), (count)) | ||
274 | #define iowrite16_rep(p, src, count) \ | ||
275 | outsw((unsigned long) (p), (src), (count)) | ||
276 | #define iowrite32_rep(p, src, count) \ | ||
277 | outsl((unsigned long) (p), (src), (count)) | ||
278 | |||
279 | #endif /* _ASM_TILE_IO_H */ | ||
diff --git a/arch/tile/include/asm/ioctl.h b/arch/tile/include/asm/ioctl.h new file mode 100644 index 000000000000..b279fe06dfe5 --- /dev/null +++ b/arch/tile/include/asm/ioctl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctl.h> | |||
diff --git a/arch/tile/include/asm/ioctls.h b/arch/tile/include/asm/ioctls.h new file mode 100644 index 000000000000..ec34c760665e --- /dev/null +++ b/arch/tile/include/asm/ioctls.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctls.h> | |||
diff --git a/arch/tile/include/asm/ipc.h b/arch/tile/include/asm/ipc.h new file mode 100644 index 000000000000..a46e3d9c2a3f --- /dev/null +++ b/arch/tile/include/asm/ipc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipc.h> | |||
diff --git a/arch/tile/include/asm/ipcbuf.h b/arch/tile/include/asm/ipcbuf.h new file mode 100644 index 000000000000..84c7e51cb6d0 --- /dev/null +++ b/arch/tile/include/asm/ipcbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipcbuf.h> | |||
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h new file mode 100644 index 000000000000..9be1f849fac9 --- /dev/null +++ b/arch/tile/include/asm/irq.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQ_H | ||
16 | #define _ASM_TILE_IRQ_H | ||
17 | |||
18 | #include <linux/hardirq.h> | ||
19 | |||
20 | /* The hypervisor interface provides 32 IRQs. */ | ||
21 | #define NR_IRQS 32 | ||
22 | |||
23 | /* IRQ numbers used for linux IPIs. */ | ||
24 | #define IRQ_RESCHEDULE 1 | ||
25 | |||
26 | /* The HV interrupt state object. */ | ||
27 | DECLARE_PER_CPU(HV_IntrState, dev_intr_state); | ||
28 | |||
29 | void ack_bad_irq(unsigned int irq); | ||
30 | |||
31 | /* | ||
32 | * Paravirtualized drivers should call this when their init calls | ||
33 | * discover a valid HV IRQ. | ||
34 | */ | ||
35 | void tile_irq_activate(unsigned int irq); | ||
36 | |||
37 | #endif /* _ASM_TILE_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/irq_regs.h b/arch/tile/include/asm/irq_regs.h new file mode 100644 index 000000000000..3dd9c0b70270 --- /dev/null +++ b/arch/tile/include/asm/irq_regs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/irq_regs.h> | |||
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h new file mode 100644 index 000000000000..cf5bffd00fef --- /dev/null +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -0,0 +1,267 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQFLAGS_H | ||
16 | #define _ASM_TILE_IRQFLAGS_H | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <arch/interrupts.h> | ||
20 | #include <arch/chip.h> | ||
21 | |||
22 | /* | ||
23 | * The set of interrupts we want to allow when interrupts are nominally | ||
24 | * disabled. The remainder are effectively "NMI" interrupts from | ||
25 | * the point of view of the generic Linux code. Note that synchronous | ||
26 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | ||
27 | */ | ||
28 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
29 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
30 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
31 | #else | ||
32 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
33 | (~(INT_MASK(INT_PERF_COUNT))) | ||
34 | #endif | ||
35 | |||
36 | #ifndef __ASSEMBLY__ | ||
37 | |||
38 | /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ | ||
39 | #include <asm/percpu.h> | ||
40 | #include <arch/spr_def.h> | ||
41 | |||
42 | /* Set and clear kernel interrupt masks. */ | ||
43 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
44 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 | ||
45 | # error Fix assumptions about which word various interrupts are in | ||
46 | #endif | ||
47 | #define interrupt_mask_set(n) do { \ | ||
48 | int __n = (n); \ | ||
49 | int __mask = 1 << (__n & 0x1f); \ | ||
50 | if (__n < 32) \ | ||
51 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ | ||
52 | else \ | ||
53 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ | ||
54 | } while (0) | ||
55 | #define interrupt_mask_reset(n) do { \ | ||
56 | int __n = (n); \ | ||
57 | int __mask = 1 << (__n & 0x1f); \ | ||
58 | if (__n < 32) \ | ||
59 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ | ||
60 | else \ | ||
61 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ | ||
62 | } while (0) | ||
63 | #define interrupt_mask_check(n) ({ \ | ||
64 | int __n = (n); \ | ||
65 | (((__n < 32) ? \ | ||
66 | __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ | ||
67 | __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ | ||
68 | >> (__n & 0x1f)) & 1; \ | ||
69 | }) | ||
70 | #define interrupt_mask_set_mask(mask) do { \ | ||
71 | unsigned long long __m = (mask); \ | ||
72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ | ||
73 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ | ||
74 | } while (0) | ||
75 | #define interrupt_mask_reset_mask(mask) do { \ | ||
76 | unsigned long long __m = (mask); \ | ||
77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ | ||
78 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ | ||
79 | } while (0) | ||
80 | #else | ||
81 | #define interrupt_mask_set(n) \ | ||
82 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) | ||
83 | #define interrupt_mask_reset(n) \ | ||
84 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) | ||
85 | #define interrupt_mask_check(n) \ | ||
86 | ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) | ||
87 | #define interrupt_mask_set_mask(mask) \ | ||
88 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) | ||
89 | #define interrupt_mask_reset_mask(mask) \ | ||
90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) | ||
91 | #endif | ||
92 | |||
93 | /* | ||
94 | * The set of interrupts we want active if irqs are enabled. | ||
95 | * Note that in particular, the tile timer interrupt comes and goes | ||
96 | * from this set, since we have no other way to turn off the timer. | ||
97 | * Likewise, INTCTRL_1 is removed and re-added during device | ||
98 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. | ||
99 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it | ||
100 | * is always claimed as an "active interrupt" so we can query that bit | ||
101 | * to know our current state. | ||
102 | */ | ||
103 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | ||
104 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | ||
105 | |||
106 | /* Disable interrupts. */ | ||
107 | #define raw_local_irq_disable() \ | ||
108 | interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) | ||
109 | |||
110 | /* Disable all interrupts, including NMIs. */ | ||
111 | #define raw_local_irq_disable_all() \ | ||
112 | interrupt_mask_set_mask(-1UL) | ||
113 | |||
114 | /* Re-enable all maskable interrupts. */ | ||
115 | #define raw_local_irq_enable() \ | ||
116 | interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) | ||
117 | |||
118 | /* Disable or enable interrupts based on flag argument. */ | ||
119 | #define raw_local_irq_restore(disabled) do { \ | ||
120 | if (disabled) \ | ||
121 | raw_local_irq_disable(); \ | ||
122 | else \ | ||
123 | raw_local_irq_enable(); \ | ||
124 | } while (0) | ||
125 | |||
126 | /* Return true if "flags" argument means interrupts are disabled. */ | ||
127 | #define raw_irqs_disabled_flags(flags) ((flags) != 0) | ||
128 | |||
129 | /* Return true if interrupts are currently disabled. */ | ||
130 | #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) | ||
131 | |||
132 | /* Save whether interrupts are currently disabled. */ | ||
133 | #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) | ||
134 | |||
135 | /* Save whether interrupts are currently disabled, then disable them. */ | ||
136 | #define raw_local_irq_save(flags) \ | ||
137 | do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) | ||
138 | |||
139 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | ||
140 | #define raw_local_irq_mask(interrupt) \ | ||
141 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | ||
142 | |||
143 | /* Prevent the given interrupt from being enabled immediately. */ | ||
144 | #define raw_local_irq_mask_now(interrupt) do { \ | ||
145 | raw_local_irq_mask(interrupt); \ | ||
146 | interrupt_mask_set(interrupt); \ | ||
147 | } while (0) | ||
148 | |||
149 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | ||
150 | #define raw_local_irq_unmask(interrupt) \ | ||
151 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | ||
152 | |||
153 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | ||
154 | #define raw_local_irq_unmask_now(interrupt) do { \ | ||
155 | raw_local_irq_unmask(interrupt); \ | ||
156 | if (!irqs_disabled()) \ | ||
157 | interrupt_mask_reset(interrupt); \ | ||
158 | } while (0) | ||
159 | |||
160 | #else /* __ASSEMBLY__ */ | ||
161 | |||
162 | /* We provide a somewhat more restricted set for assembly. */ | ||
163 | |||
164 | #ifdef __tilegx__ | ||
165 | |||
166 | #if INT_MEM_ERROR != 0 | ||
167 | # error Fix IRQ_DISABLED() macro | ||
168 | #endif | ||
169 | |||
170 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | ||
171 | #define IRQS_DISABLED(tmp) \ | ||
172 | mfspr tmp, INTERRUPT_MASK_1; \ | ||
173 | andi tmp, tmp, 1 | ||
174 | |||
175 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
176 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
177 | moveli reg, hw2_last(interrupts_enabled_mask); \ | ||
178 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ | ||
179 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ | ||
180 | add reg, reg, tp | ||
181 | |||
182 | /* Disable interrupts. */ | ||
183 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
184 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ | ||
185 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ | ||
186 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ | ||
187 | mtspr INTERRUPT_MASK_SET_1, tmp0 | ||
188 | |||
189 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
190 | #define IRQ_DISABLE_ALL(tmp) \ | ||
191 | movei tmp, -1; \ | ||
192 | mtspr INTERRUPT_MASK_SET_1, tmp | ||
193 | |||
194 | /* Enable interrupts. */ | ||
195 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
196 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
197 | ld tmp0, tmp0; \ | ||
198 | mtspr INTERRUPT_MASK_RESET_1, tmp0 | ||
199 | |||
200 | #else /* !__tilegx__ */ | ||
201 | |||
202 | /* | ||
203 | * Return 0 or 1 to indicate whether interrupts are currently disabled. | ||
204 | * Note that it's important that we use a bit from the "low" mask word, | ||
205 | * since when we are enabling, that is the word we write first, so if we | ||
206 | * are interrupted after only writing half of the mask, the interrupt | ||
207 | * handler will correctly observe that we have interrupts enabled, and | ||
208 | * will enable interrupts itself on return from the interrupt handler | ||
209 | * (making the original code's write of the "high" mask word idempotent). | ||
210 | */ | ||
211 | #define IRQS_DISABLED(tmp) \ | ||
212 | mfspr tmp, INTERRUPT_MASK_1_0; \ | ||
213 | shri tmp, tmp, INT_MEM_ERROR; \ | ||
214 | andi tmp, tmp, 1 | ||
215 | |||
216 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
217 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
218 | moveli reg, lo16(interrupts_enabled_mask); \ | ||
219 | auli reg, reg, ha16(interrupts_enabled_mask);\ | ||
220 | add reg, reg, tp | ||
221 | |||
222 | /* Disable interrupts. */ | ||
223 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
224 | { \ | ||
225 | movei tmp0, -1; \ | ||
226 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ | ||
227 | }; \ | ||
228 | { \ | ||
229 | mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ | ||
230 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ | ||
231 | }; \ | ||
232 | mtspr INTERRUPT_MASK_SET_1_1, tmp1 | ||
233 | |||
234 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
235 | #define IRQ_DISABLE_ALL(tmp) \ | ||
236 | movei tmp, -1; \ | ||
237 | mtspr INTERRUPT_MASK_SET_1_0, tmp; \ | ||
238 | mtspr INTERRUPT_MASK_SET_1_1, tmp | ||
239 | |||
240 | /* Enable interrupts. */ | ||
241 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
242 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
243 | { \ | ||
244 | lw tmp0, tmp0; \ | ||
245 | addi tmp1, tmp0, 4 \ | ||
246 | }; \ | ||
247 | lw tmp1, tmp1; \ | ||
248 | mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ | ||
249 | mtspr INTERRUPT_MASK_RESET_1_1, tmp1 | ||
250 | #endif | ||
251 | |||
252 | /* | ||
253 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
254 | * C function, but almost everywhere we do, we don't mind clobbering | ||
255 | * all the caller-saved registers. | ||
256 | */ | ||
257 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
258 | # define TRACE_IRQS_ON jal trace_hardirqs_on | ||
259 | # define TRACE_IRQS_OFF jal trace_hardirqs_off | ||
260 | #else | ||
261 | # define TRACE_IRQS_ON | ||
262 | # define TRACE_IRQS_OFF | ||
263 | #endif | ||
264 | |||
265 | #endif /* __ASSEMBLY__ */ | ||
266 | |||
267 | #endif /* _ASM_TILE_IRQFLAGS_H */ | ||
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/arch/tile/include/asm/kdebug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/kdebug.h> | |||
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h new file mode 100644 index 000000000000..c11a6cc73bb8 --- /dev/null +++ b/arch/tile/include/asm/kexec.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on kexec.h from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_KEXEC_H | ||
18 | #define _ASM_TILE_KEXEC_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | |||
22 | /* Maximum physical address we can use pages from. */ | ||
23 | #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE | ||
24 | /* Maximum address we can reach in physical address mode. */ | ||
25 | #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE | ||
26 | /* Maximum address we can use for the control code buffer. */ | ||
27 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | ||
28 | |||
29 | #define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE | ||
30 | |||
31 | /* | ||
32 | * We don't bother to provide a unique identifier, since we can only | ||
33 | * reboot with a single type of kernel image anyway. | ||
34 | */ | ||
35 | #define KEXEC_ARCH KEXEC_ARCH_DEFAULT | ||
36 | |||
37 | /* Use the tile override for the page allocator. */ | ||
38 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order); | ||
39 | #define kimage_alloc_pages_arch kimage_alloc_pages_arch | ||
40 | |||
41 | #define MAX_NOTE_BYTES 1024 | ||
42 | |||
43 | /* Defined in arch/tile/kernel/relocate_kernel.S */ | ||
44 | extern const unsigned char relocate_new_kernel[]; | ||
45 | extern const unsigned long relocate_new_kernel_size; | ||
46 | extern void relocate_new_kernel_end(void); | ||
47 | |||
48 | /* Provide a dummy definition to avoid build failures. */ | ||
49 | static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | #endif /* _ASM_TILE_KEXEC_H */ | ||
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h new file mode 100644 index 000000000000..1480106d1c05 --- /dev/null +++ b/arch/tile/include/asm/kmap_types.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_KMAP_TYPES_H | ||
16 | #define _ASM_TILE_KMAP_TYPES_H | ||
17 | |||
18 | /* | ||
19 | * In TILE Linux each set of four of these uses another 16MB chunk of | ||
20 | * address space, given 64 tiles and 64KB pages, so we only enable | ||
21 | * ones that are required by the kernel configuration. | ||
22 | */ | ||
23 | enum km_type { | ||
24 | KM_BOUNCE_READ, | ||
25 | KM_SKB_SUNRPC_DATA, | ||
26 | KM_SKB_DATA_SOFTIRQ, | ||
27 | KM_USER0, | ||
28 | KM_USER1, | ||
29 | KM_BIO_SRC_IRQ, | ||
30 | KM_IRQ0, | ||
31 | KM_IRQ1, | ||
32 | KM_SOFTIRQ0, | ||
33 | KM_SOFTIRQ1, | ||
34 | KM_MEMCPY0, | ||
35 | KM_MEMCPY1, | ||
36 | #if defined(CONFIG_HIGHPTE) | ||
37 | KM_PTE0, | ||
38 | KM_PTE1, | ||
39 | #endif | ||
40 | KM_TYPE_NR | ||
41 | }; | ||
42 | |||
43 | #endif /* _ASM_TILE_KMAP_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/linkage.h b/arch/tile/include/asm/linkage.h new file mode 100644 index 000000000000..e121c39751a7 --- /dev/null +++ b/arch/tile/include/asm/linkage.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_LINKAGE_H | ||
16 | #define _ASM_TILE_LINKAGE_H | ||
17 | |||
18 | #include <feedback.h> | ||
19 | |||
20 | #define __ALIGN .align 8 | ||
21 | |||
22 | /* | ||
23 | * The STD_ENTRY and STD_ENDPROC macros put the function in a | ||
24 | * self-named .text.foo section, and if linker feedback collection | ||
25 | * is enabled, add a suitable call to the feedback collection code. | ||
26 | * STD_ENTRY_SECTION lets you specify a non-standard section name. | ||
27 | */ | ||
28 | |||
29 | #define STD_ENTRY(name) \ | ||
30 | .pushsection .text.##name, "ax"; \ | ||
31 | ENTRY(name); \ | ||
32 | FEEDBACK_ENTER(name) | ||
33 | |||
34 | #define STD_ENTRY_SECTION(name, section) \ | ||
35 | .pushsection section, "ax"; \ | ||
36 | ENTRY(name); \ | ||
37 | FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name) | ||
38 | |||
39 | #define STD_ENDPROC(name) \ | ||
40 | ENDPROC(name); \ | ||
41 | .Lend_##name:; \ | ||
42 | .popsection | ||
43 | |||
44 | /* Create a file-static function entry set up for feedback gathering. */ | ||
45 | #define STD_ENTRY_LOCAL(name) \ | ||
46 | .pushsection .text.##name, "ax"; \ | ||
47 | ALIGN; \ | ||
48 | name:; \ | ||
49 | FEEDBACK_ENTER(name) | ||
50 | |||
51 | #endif /* _ASM_TILE_LINKAGE_H */ | ||
diff --git a/arch/tile/include/asm/local.h b/arch/tile/include/asm/local.h new file mode 100644 index 000000000000..c11c530f74d0 --- /dev/null +++ b/arch/tile/include/asm/local.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local.h> | |||
diff --git a/arch/tile/include/asm/memprof.h b/arch/tile/include/asm/memprof.h new file mode 100644 index 000000000000..359949be28c1 --- /dev/null +++ b/arch/tile/include/asm/memprof.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The hypervisor's memory controller profiling infrastructure allows | ||
15 | * the programmer to find out what fraction of the available memory | ||
16 | * bandwidth is being consumed at each memory controller. The | ||
17 | * profiler provides start, stop, and clear operations to allows | ||
18 | * profiling over a specific time window, as well as an interface for | ||
19 | * reading the most recent profile values. | ||
20 | * | ||
21 | * This header declares IOCTL codes necessary to control memprof. | ||
22 | */ | ||
23 | #ifndef _ASM_TILE_MEMPROF_H | ||
24 | #define _ASM_TILE_MEMPROF_H | ||
25 | |||
26 | #include <linux/ioctl.h> | ||
27 | |||
28 | #define MEMPROF_IOCTL_TYPE 0xB4 | ||
29 | #define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0) | ||
30 | #define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1) | ||
31 | #define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2) | ||
32 | |||
33 | #endif /* _ASM_TILE_MEMPROF_H */ | ||
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h new file mode 100644 index 000000000000..4c6811e3e8dc --- /dev/null +++ b/arch/tile/include/asm/mman.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMAN_H | ||
16 | #define _ASM_TILE_MMAN_H | ||
17 | |||
18 | #include <asm-generic/mman-common.h> | ||
19 | #include <arch/chip.h> | ||
20 | |||
21 | /* Standard Linux flags */ | ||
22 | |||
23 | #define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */ | ||
24 | #define MAP_NONBLOCK 0x0080 /* do not block on IO */ | ||
25 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
26 | #define MAP_LOCKED 0x0200 /* pages are locked */ | ||
27 | #define MAP_NORESERVE 0x0400 /* don't check for reservations */ | ||
28 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
29 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
30 | #define MAP_HUGETLB 0x4000 /* create a huge page mapping */ | ||
31 | |||
32 | |||
33 | /* | ||
34 | * Flags for mlockall | ||
35 | */ | ||
36 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
37 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
38 | |||
39 | |||
40 | #endif /* _ASM_TILE_MMAN_H */ | ||
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h new file mode 100644 index 000000000000..92f94c77b6e4 --- /dev/null +++ b/arch/tile/include/asm/mmu.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_H | ||
16 | #define _ASM_TILE_MMU_H | ||
17 | |||
18 | /* Capture any arch- and mm-specific information. */ | ||
19 | struct mm_context { | ||
20 | /* | ||
21 | * Written under the mmap_sem semaphore; read without the | ||
22 | * semaphore but atomically, but it is conservatively set. | ||
23 | */ | ||
24 | unsigned int priority_cached; | ||
25 | }; | ||
26 | |||
27 | typedef struct mm_context mm_context_t; | ||
28 | |||
29 | void leave_mm(int cpu); | ||
30 | |||
31 | #endif /* _ASM_TILE_MMU_H */ | ||
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h new file mode 100644 index 000000000000..9bc0d0725c28 --- /dev/null +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_CONTEXT_H | ||
16 | #define _ASM_TILE_MMU_CONTEXT_H | ||
17 | |||
18 | #include <linux/smp.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/homecache.h> | ||
25 | #include <asm-generic/mm_hooks.h> | ||
26 | |||
27 | static inline int | ||
28 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ | ||
34 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | ||
35 | { | ||
36 | /* FIXME: DIRECTIO should not always be set. FIXME. */ | ||
37 | int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); | ||
38 | if (rc < 0) | ||
39 | panic("hv_install_context failed: %d", rc); | ||
40 | } | ||
41 | |||
42 | static inline void install_page_table(pgd_t *pgdir, int asid) | ||
43 | { | ||
44 | pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); | ||
45 | __install_page_table(pgdir, asid, *ptep); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * "Lazy" TLB mode is entered when we are switching to a kernel task, | ||
50 | * which borrows the mm of the previous task. The goal of this | ||
51 | * optimization is to avoid having to install a new page table. On | ||
52 | * early x86 machines (where the concept originated) you couldn't do | ||
53 | * anything short of a full page table install for invalidation, so | ||
54 | * handling a remote TLB invalidate required doing a page table | ||
55 | * re-install. Someone clearly decided that it was silly to keep | ||
56 | * doing this while in "lazy" TLB mode, so the optimization involves | ||
57 | * installing the swapper page table instead the first time one | ||
58 | * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running | ||
59 | * the kernel task doesn't need to take any more interrupts. At that | ||
60 | * point it's then necessary to explicitly reinstall it when context | ||
61 | * switching back to the original mm. | ||
62 | * | ||
63 | * On Tile, we have to do a page-table install whenever DMA is enabled, | ||
64 | * so in that case lazy mode doesn't help anyway. And more generally, | ||
65 | * we have efficient per-page TLB shootdown, and don't expect to spend | ||
66 | * that much time in kernel tasks in general, so just leaving the | ||
67 | * kernel task borrowing the old page table, but handling TLB | ||
68 | * shootdowns, is a reasonable thing to do. And importantly, this | ||
69 | * lets us use the hypervisor's internal APIs for TLB shootdown, which | ||
70 | * means we don't have to worry about having TLB shootdowns blocked | ||
71 | * when Linux is disabling interrupts; see the page migration code for | ||
72 | * an example of where it's important for TLB shootdowns to complete | ||
73 | * even when interrupts are disabled at the Linux level. | ||
74 | */ | ||
75 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) | ||
76 | { | ||
77 | #if CHIP_HAS_TILE_DMA() | ||
78 | /* | ||
79 | * We have to do an "identity" page table switch in order to | ||
80 | * clear any pending DMA interrupts. | ||
81 | */ | ||
82 | if (current->thread.tile_dma_state.enabled) | ||
83 | install_page_table(mm->pgd, __get_cpu_var(current_asid)); | ||
84 | #endif | ||
85 | } | ||
86 | |||
87 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
88 | struct task_struct *tsk) | ||
89 | { | ||
90 | if (likely(prev != next)) { | ||
91 | |||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | /* Pick new ASID. */ | ||
95 | int asid = __get_cpu_var(current_asid) + 1; | ||
96 | if (asid > max_asid) { | ||
97 | asid = min_asid; | ||
98 | local_flush_tlb(); | ||
99 | } | ||
100 | __get_cpu_var(current_asid) = asid; | ||
101 | |||
102 | /* Clear cpu from the old mm, and set it in the new one. */ | ||
103 | cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); | ||
104 | cpumask_set_cpu(cpu, &next->cpu_vm_mask); | ||
105 | |||
106 | /* Re-load page tables */ | ||
107 | install_page_table(next->pgd, asid); | ||
108 | |||
109 | /* See how we should set the red/black cache info */ | ||
110 | check_mm_caching(prev, next); | ||
111 | |||
112 | /* | ||
113 | * Since we're changing to a new mm, we have to flush | ||
114 | * the icache in case some physical page now being mapped | ||
115 | * has subsequently been repurposed and has new code. | ||
116 | */ | ||
117 | __flush_icache(); | ||
118 | |||
119 | } | ||
120 | } | ||
121 | |||
122 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
123 | struct mm_struct *next_mm) | ||
124 | { | ||
125 | switch_mm(prev_mm, next_mm, NULL); | ||
126 | } | ||
127 | |||
128 | #define destroy_context(mm) do { } while (0) | ||
129 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
130 | |||
131 | #endif /* _ASM_TILE_MMU_CONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h new file mode 100644 index 000000000000..c6344c4f32ac --- /dev/null +++ b/arch/tile/include/asm/mmzone.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMZONE_H | ||
16 | #define _ASM_TILE_MMZONE_H | ||
17 | |||
18 | extern struct pglist_data node_data[]; | ||
19 | #define NODE_DATA(nid) (&node_data[nid]) | ||
20 | |||
21 | extern void get_memcfg_numa(void); | ||
22 | |||
23 | #ifdef CONFIG_DISCONTIGMEM | ||
24 | |||
25 | #include <asm/page.h> | ||
26 | |||
27 | /* | ||
28 | * Generally, memory ranges are always doled out by the hypervisor in | ||
29 | * fixed-size, power-of-two increments. That would make computing the node | ||
30 | * very easy. We could just take a couple high bits of the PA, which | ||
31 | * denote the memory shim, and we'd be done. However, when we're doing | ||
32 | * memory striping, this may not be true; PAs with different high bit | ||
33 | * values might be in the same node. Thus, we keep a lookup table to | ||
34 | * translate the high bits of the PFN to the node number. | ||
35 | */ | ||
36 | extern int highbits_to_node[]; | ||
37 | |||
38 | static inline int pfn_to_nid(unsigned long pfn) | ||
39 | { | ||
40 | return highbits_to_node[__pfn_to_highbits(pfn)]; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Following are macros that each numa implmentation must define. | ||
45 | */ | ||
46 | |||
47 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
48 | #define node_end_pfn(nid) \ | ||
49 | ({ \ | ||
50 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
51 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
52 | }) | ||
53 | |||
54 | #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) | ||
55 | |||
56 | static inline int pfn_valid(int pfn) | ||
57 | { | ||
58 | int nid = pfn_to_nid(pfn); | ||
59 | |||
60 | if (nid >= 0) | ||
61 | return (pfn < node_end_pfn(nid)); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* Information on the NUMA nodes that we compute early */ | ||
66 | extern unsigned long node_start_pfn[]; | ||
67 | extern unsigned long node_end_pfn[]; | ||
68 | extern unsigned long node_memmap_pfn[]; | ||
69 | extern unsigned long node_percpu_pfn[]; | ||
70 | extern unsigned long node_free_pfn[]; | ||
71 | #ifdef CONFIG_HIGHMEM | ||
72 | extern unsigned long node_lowmem_end_pfn[]; | ||
73 | #endif | ||
74 | #ifdef CONFIG_PCI | ||
75 | extern unsigned long pci_reserve_start_pfn; | ||
76 | extern unsigned long pci_reserve_end_pfn; | ||
77 | #endif | ||
78 | |||
79 | #endif /* CONFIG_DISCONTIGMEM */ | ||
80 | |||
81 | #endif /* _ASM_TILE_MMZONE_H */ | ||
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h new file mode 100644 index 000000000000..1e4b79fe8584 --- /dev/null +++ b/arch/tile/include/asm/module.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/module.h> | |||
diff --git a/arch/tile/include/asm/msgbuf.h b/arch/tile/include/asm/msgbuf.h new file mode 100644 index 000000000000..809134c644a6 --- /dev/null +++ b/arch/tile/include/asm/msgbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/msgbuf.h> | |||
diff --git a/arch/tile/include/asm/mutex.h b/arch/tile/include/asm/mutex.h new file mode 100644 index 000000000000..ff6101aa2c71 --- /dev/null +++ b/arch/tile/include/asm/mutex.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/mutex-dec.h> | |||
diff --git a/arch/tile/include/asm/opcode-tile.h b/arch/tile/include/asm/opcode-tile.h new file mode 100644 index 000000000000..ba38959137d7 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_TILE_H | ||
16 | #define _ASM_TILE_OPCODE_TILE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode-tile_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode-tile_32.h> | ||
24 | #endif | ||
25 | |||
26 | /* These definitions are not correct for TILE64, so just avoid them. */ | ||
27 | #undef TILE_ELF_MACHINE_CODE | ||
28 | #undef TILE_ELF_NAME | ||
29 | |||
30 | #endif /* _ASM_TILE_OPCODE_TILE_H */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h new file mode 100644 index 000000000000..90f8dd372531 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_32.h | |||
@@ -0,0 +1,1597 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_ADD, | ||
32 | TILE_OPC_ADD_SN, | ||
33 | TILE_OPC_ADDB, | ||
34 | TILE_OPC_ADDB_SN, | ||
35 | TILE_OPC_ADDBS_U, | ||
36 | TILE_OPC_ADDBS_U_SN, | ||
37 | TILE_OPC_ADDH, | ||
38 | TILE_OPC_ADDH_SN, | ||
39 | TILE_OPC_ADDHS, | ||
40 | TILE_OPC_ADDHS_SN, | ||
41 | TILE_OPC_ADDI, | ||
42 | TILE_OPC_ADDI_SN, | ||
43 | TILE_OPC_ADDIB, | ||
44 | TILE_OPC_ADDIB_SN, | ||
45 | TILE_OPC_ADDIH, | ||
46 | TILE_OPC_ADDIH_SN, | ||
47 | TILE_OPC_ADDLI, | ||
48 | TILE_OPC_ADDLI_SN, | ||
49 | TILE_OPC_ADDLIS, | ||
50 | TILE_OPC_ADDS, | ||
51 | TILE_OPC_ADDS_SN, | ||
52 | TILE_OPC_ADIFFB_U, | ||
53 | TILE_OPC_ADIFFB_U_SN, | ||
54 | TILE_OPC_ADIFFH, | ||
55 | TILE_OPC_ADIFFH_SN, | ||
56 | TILE_OPC_AND, | ||
57 | TILE_OPC_AND_SN, | ||
58 | TILE_OPC_ANDI, | ||
59 | TILE_OPC_ANDI_SN, | ||
60 | TILE_OPC_AULI, | ||
61 | TILE_OPC_AVGB_U, | ||
62 | TILE_OPC_AVGB_U_SN, | ||
63 | TILE_OPC_AVGH, | ||
64 | TILE_OPC_AVGH_SN, | ||
65 | TILE_OPC_BBNS, | ||
66 | TILE_OPC_BBNS_SN, | ||
67 | TILE_OPC_BBNST, | ||
68 | TILE_OPC_BBNST_SN, | ||
69 | TILE_OPC_BBS, | ||
70 | TILE_OPC_BBS_SN, | ||
71 | TILE_OPC_BBST, | ||
72 | TILE_OPC_BBST_SN, | ||
73 | TILE_OPC_BGEZ, | ||
74 | TILE_OPC_BGEZ_SN, | ||
75 | TILE_OPC_BGEZT, | ||
76 | TILE_OPC_BGEZT_SN, | ||
77 | TILE_OPC_BGZ, | ||
78 | TILE_OPC_BGZ_SN, | ||
79 | TILE_OPC_BGZT, | ||
80 | TILE_OPC_BGZT_SN, | ||
81 | TILE_OPC_BITX, | ||
82 | TILE_OPC_BITX_SN, | ||
83 | TILE_OPC_BLEZ, | ||
84 | TILE_OPC_BLEZ_SN, | ||
85 | TILE_OPC_BLEZT, | ||
86 | TILE_OPC_BLEZT_SN, | ||
87 | TILE_OPC_BLZ, | ||
88 | TILE_OPC_BLZ_SN, | ||
89 | TILE_OPC_BLZT, | ||
90 | TILE_OPC_BLZT_SN, | ||
91 | TILE_OPC_BNZ, | ||
92 | TILE_OPC_BNZ_SN, | ||
93 | TILE_OPC_BNZT, | ||
94 | TILE_OPC_BNZT_SN, | ||
95 | TILE_OPC_BYTEX, | ||
96 | TILE_OPC_BYTEX_SN, | ||
97 | TILE_OPC_BZ, | ||
98 | TILE_OPC_BZ_SN, | ||
99 | TILE_OPC_BZT, | ||
100 | TILE_OPC_BZT_SN, | ||
101 | TILE_OPC_CLZ, | ||
102 | TILE_OPC_CLZ_SN, | ||
103 | TILE_OPC_CRC32_32, | ||
104 | TILE_OPC_CRC32_32_SN, | ||
105 | TILE_OPC_CRC32_8, | ||
106 | TILE_OPC_CRC32_8_SN, | ||
107 | TILE_OPC_CTZ, | ||
108 | TILE_OPC_CTZ_SN, | ||
109 | TILE_OPC_DRAIN, | ||
110 | TILE_OPC_DTLBPR, | ||
111 | TILE_OPC_DWORD_ALIGN, | ||
112 | TILE_OPC_DWORD_ALIGN_SN, | ||
113 | TILE_OPC_FINV, | ||
114 | TILE_OPC_FLUSH, | ||
115 | TILE_OPC_FNOP, | ||
116 | TILE_OPC_ICOH, | ||
117 | TILE_OPC_ILL, | ||
118 | TILE_OPC_INTHB, | ||
119 | TILE_OPC_INTHB_SN, | ||
120 | TILE_OPC_INTHH, | ||
121 | TILE_OPC_INTHH_SN, | ||
122 | TILE_OPC_INTLB, | ||
123 | TILE_OPC_INTLB_SN, | ||
124 | TILE_OPC_INTLH, | ||
125 | TILE_OPC_INTLH_SN, | ||
126 | TILE_OPC_INV, | ||
127 | TILE_OPC_IRET, | ||
128 | TILE_OPC_JALB, | ||
129 | TILE_OPC_JALF, | ||
130 | TILE_OPC_JALR, | ||
131 | TILE_OPC_JALRP, | ||
132 | TILE_OPC_JB, | ||
133 | TILE_OPC_JF, | ||
134 | TILE_OPC_JR, | ||
135 | TILE_OPC_JRP, | ||
136 | TILE_OPC_LB, | ||
137 | TILE_OPC_LB_SN, | ||
138 | TILE_OPC_LB_U, | ||
139 | TILE_OPC_LB_U_SN, | ||
140 | TILE_OPC_LBADD, | ||
141 | TILE_OPC_LBADD_SN, | ||
142 | TILE_OPC_LBADD_U, | ||
143 | TILE_OPC_LBADD_U_SN, | ||
144 | TILE_OPC_LH, | ||
145 | TILE_OPC_LH_SN, | ||
146 | TILE_OPC_LH_U, | ||
147 | TILE_OPC_LH_U_SN, | ||
148 | TILE_OPC_LHADD, | ||
149 | TILE_OPC_LHADD_SN, | ||
150 | TILE_OPC_LHADD_U, | ||
151 | TILE_OPC_LHADD_U_SN, | ||
152 | TILE_OPC_LNK, | ||
153 | TILE_OPC_LNK_SN, | ||
154 | TILE_OPC_LW, | ||
155 | TILE_OPC_LW_SN, | ||
156 | TILE_OPC_LW_NA, | ||
157 | TILE_OPC_LW_NA_SN, | ||
158 | TILE_OPC_LWADD, | ||
159 | TILE_OPC_LWADD_SN, | ||
160 | TILE_OPC_LWADD_NA, | ||
161 | TILE_OPC_LWADD_NA_SN, | ||
162 | TILE_OPC_MAXB_U, | ||
163 | TILE_OPC_MAXB_U_SN, | ||
164 | TILE_OPC_MAXH, | ||
165 | TILE_OPC_MAXH_SN, | ||
166 | TILE_OPC_MAXIB_U, | ||
167 | TILE_OPC_MAXIB_U_SN, | ||
168 | TILE_OPC_MAXIH, | ||
169 | TILE_OPC_MAXIH_SN, | ||
170 | TILE_OPC_MF, | ||
171 | TILE_OPC_MFSPR, | ||
172 | TILE_OPC_MINB_U, | ||
173 | TILE_OPC_MINB_U_SN, | ||
174 | TILE_OPC_MINH, | ||
175 | TILE_OPC_MINH_SN, | ||
176 | TILE_OPC_MINIB_U, | ||
177 | TILE_OPC_MINIB_U_SN, | ||
178 | TILE_OPC_MINIH, | ||
179 | TILE_OPC_MINIH_SN, | ||
180 | TILE_OPC_MM, | ||
181 | TILE_OPC_MNZ, | ||
182 | TILE_OPC_MNZ_SN, | ||
183 | TILE_OPC_MNZB, | ||
184 | TILE_OPC_MNZB_SN, | ||
185 | TILE_OPC_MNZH, | ||
186 | TILE_OPC_MNZH_SN, | ||
187 | TILE_OPC_MTSPR, | ||
188 | TILE_OPC_MULHH_SS, | ||
189 | TILE_OPC_MULHH_SS_SN, | ||
190 | TILE_OPC_MULHH_SU, | ||
191 | TILE_OPC_MULHH_SU_SN, | ||
192 | TILE_OPC_MULHH_UU, | ||
193 | TILE_OPC_MULHH_UU_SN, | ||
194 | TILE_OPC_MULHHA_SS, | ||
195 | TILE_OPC_MULHHA_SS_SN, | ||
196 | TILE_OPC_MULHHA_SU, | ||
197 | TILE_OPC_MULHHA_SU_SN, | ||
198 | TILE_OPC_MULHHA_UU, | ||
199 | TILE_OPC_MULHHA_UU_SN, | ||
200 | TILE_OPC_MULHHSA_UU, | ||
201 | TILE_OPC_MULHHSA_UU_SN, | ||
202 | TILE_OPC_MULHL_SS, | ||
203 | TILE_OPC_MULHL_SS_SN, | ||
204 | TILE_OPC_MULHL_SU, | ||
205 | TILE_OPC_MULHL_SU_SN, | ||
206 | TILE_OPC_MULHL_US, | ||
207 | TILE_OPC_MULHL_US_SN, | ||
208 | TILE_OPC_MULHL_UU, | ||
209 | TILE_OPC_MULHL_UU_SN, | ||
210 | TILE_OPC_MULHLA_SS, | ||
211 | TILE_OPC_MULHLA_SS_SN, | ||
212 | TILE_OPC_MULHLA_SU, | ||
213 | TILE_OPC_MULHLA_SU_SN, | ||
214 | TILE_OPC_MULHLA_US, | ||
215 | TILE_OPC_MULHLA_US_SN, | ||
216 | TILE_OPC_MULHLA_UU, | ||
217 | TILE_OPC_MULHLA_UU_SN, | ||
218 | TILE_OPC_MULHLSA_UU, | ||
219 | TILE_OPC_MULHLSA_UU_SN, | ||
220 | TILE_OPC_MULLL_SS, | ||
221 | TILE_OPC_MULLL_SS_SN, | ||
222 | TILE_OPC_MULLL_SU, | ||
223 | TILE_OPC_MULLL_SU_SN, | ||
224 | TILE_OPC_MULLL_UU, | ||
225 | TILE_OPC_MULLL_UU_SN, | ||
226 | TILE_OPC_MULLLA_SS, | ||
227 | TILE_OPC_MULLLA_SS_SN, | ||
228 | TILE_OPC_MULLLA_SU, | ||
229 | TILE_OPC_MULLLA_SU_SN, | ||
230 | TILE_OPC_MULLLA_UU, | ||
231 | TILE_OPC_MULLLA_UU_SN, | ||
232 | TILE_OPC_MULLLSA_UU, | ||
233 | TILE_OPC_MULLLSA_UU_SN, | ||
234 | TILE_OPC_MVNZ, | ||
235 | TILE_OPC_MVNZ_SN, | ||
236 | TILE_OPC_MVZ, | ||
237 | TILE_OPC_MVZ_SN, | ||
238 | TILE_OPC_MZ, | ||
239 | TILE_OPC_MZ_SN, | ||
240 | TILE_OPC_MZB, | ||
241 | TILE_OPC_MZB_SN, | ||
242 | TILE_OPC_MZH, | ||
243 | TILE_OPC_MZH_SN, | ||
244 | TILE_OPC_NAP, | ||
245 | TILE_OPC_NOP, | ||
246 | TILE_OPC_NOR, | ||
247 | TILE_OPC_NOR_SN, | ||
248 | TILE_OPC_OR, | ||
249 | TILE_OPC_OR_SN, | ||
250 | TILE_OPC_ORI, | ||
251 | TILE_OPC_ORI_SN, | ||
252 | TILE_OPC_PACKBS_U, | ||
253 | TILE_OPC_PACKBS_U_SN, | ||
254 | TILE_OPC_PACKHB, | ||
255 | TILE_OPC_PACKHB_SN, | ||
256 | TILE_OPC_PACKHS, | ||
257 | TILE_OPC_PACKHS_SN, | ||
258 | TILE_OPC_PACKLB, | ||
259 | TILE_OPC_PACKLB_SN, | ||
260 | TILE_OPC_PCNT, | ||
261 | TILE_OPC_PCNT_SN, | ||
262 | TILE_OPC_RL, | ||
263 | TILE_OPC_RL_SN, | ||
264 | TILE_OPC_RLI, | ||
265 | TILE_OPC_RLI_SN, | ||
266 | TILE_OPC_S1A, | ||
267 | TILE_OPC_S1A_SN, | ||
268 | TILE_OPC_S2A, | ||
269 | TILE_OPC_S2A_SN, | ||
270 | TILE_OPC_S3A, | ||
271 | TILE_OPC_S3A_SN, | ||
272 | TILE_OPC_SADAB_U, | ||
273 | TILE_OPC_SADAB_U_SN, | ||
274 | TILE_OPC_SADAH, | ||
275 | TILE_OPC_SADAH_SN, | ||
276 | TILE_OPC_SADAH_U, | ||
277 | TILE_OPC_SADAH_U_SN, | ||
278 | TILE_OPC_SADB_U, | ||
279 | TILE_OPC_SADB_U_SN, | ||
280 | TILE_OPC_SADH, | ||
281 | TILE_OPC_SADH_SN, | ||
282 | TILE_OPC_SADH_U, | ||
283 | TILE_OPC_SADH_U_SN, | ||
284 | TILE_OPC_SB, | ||
285 | TILE_OPC_SBADD, | ||
286 | TILE_OPC_SEQ, | ||
287 | TILE_OPC_SEQ_SN, | ||
288 | TILE_OPC_SEQB, | ||
289 | TILE_OPC_SEQB_SN, | ||
290 | TILE_OPC_SEQH, | ||
291 | TILE_OPC_SEQH_SN, | ||
292 | TILE_OPC_SEQI, | ||
293 | TILE_OPC_SEQI_SN, | ||
294 | TILE_OPC_SEQIB, | ||
295 | TILE_OPC_SEQIB_SN, | ||
296 | TILE_OPC_SEQIH, | ||
297 | TILE_OPC_SEQIH_SN, | ||
298 | TILE_OPC_SH, | ||
299 | TILE_OPC_SHADD, | ||
300 | TILE_OPC_SHL, | ||
301 | TILE_OPC_SHL_SN, | ||
302 | TILE_OPC_SHLB, | ||
303 | TILE_OPC_SHLB_SN, | ||
304 | TILE_OPC_SHLH, | ||
305 | TILE_OPC_SHLH_SN, | ||
306 | TILE_OPC_SHLI, | ||
307 | TILE_OPC_SHLI_SN, | ||
308 | TILE_OPC_SHLIB, | ||
309 | TILE_OPC_SHLIB_SN, | ||
310 | TILE_OPC_SHLIH, | ||
311 | TILE_OPC_SHLIH_SN, | ||
312 | TILE_OPC_SHR, | ||
313 | TILE_OPC_SHR_SN, | ||
314 | TILE_OPC_SHRB, | ||
315 | TILE_OPC_SHRB_SN, | ||
316 | TILE_OPC_SHRH, | ||
317 | TILE_OPC_SHRH_SN, | ||
318 | TILE_OPC_SHRI, | ||
319 | TILE_OPC_SHRI_SN, | ||
320 | TILE_OPC_SHRIB, | ||
321 | TILE_OPC_SHRIB_SN, | ||
322 | TILE_OPC_SHRIH, | ||
323 | TILE_OPC_SHRIH_SN, | ||
324 | TILE_OPC_SLT, | ||
325 | TILE_OPC_SLT_SN, | ||
326 | TILE_OPC_SLT_U, | ||
327 | TILE_OPC_SLT_U_SN, | ||
328 | TILE_OPC_SLTB, | ||
329 | TILE_OPC_SLTB_SN, | ||
330 | TILE_OPC_SLTB_U, | ||
331 | TILE_OPC_SLTB_U_SN, | ||
332 | TILE_OPC_SLTE, | ||
333 | TILE_OPC_SLTE_SN, | ||
334 | TILE_OPC_SLTE_U, | ||
335 | TILE_OPC_SLTE_U_SN, | ||
336 | TILE_OPC_SLTEB, | ||
337 | TILE_OPC_SLTEB_SN, | ||
338 | TILE_OPC_SLTEB_U, | ||
339 | TILE_OPC_SLTEB_U_SN, | ||
340 | TILE_OPC_SLTEH, | ||
341 | TILE_OPC_SLTEH_SN, | ||
342 | TILE_OPC_SLTEH_U, | ||
343 | TILE_OPC_SLTEH_U_SN, | ||
344 | TILE_OPC_SLTH, | ||
345 | TILE_OPC_SLTH_SN, | ||
346 | TILE_OPC_SLTH_U, | ||
347 | TILE_OPC_SLTH_U_SN, | ||
348 | TILE_OPC_SLTI, | ||
349 | TILE_OPC_SLTI_SN, | ||
350 | TILE_OPC_SLTI_U, | ||
351 | TILE_OPC_SLTI_U_SN, | ||
352 | TILE_OPC_SLTIB, | ||
353 | TILE_OPC_SLTIB_SN, | ||
354 | TILE_OPC_SLTIB_U, | ||
355 | TILE_OPC_SLTIB_U_SN, | ||
356 | TILE_OPC_SLTIH, | ||
357 | TILE_OPC_SLTIH_SN, | ||
358 | TILE_OPC_SLTIH_U, | ||
359 | TILE_OPC_SLTIH_U_SN, | ||
360 | TILE_OPC_SNE, | ||
361 | TILE_OPC_SNE_SN, | ||
362 | TILE_OPC_SNEB, | ||
363 | TILE_OPC_SNEB_SN, | ||
364 | TILE_OPC_SNEH, | ||
365 | TILE_OPC_SNEH_SN, | ||
366 | TILE_OPC_SRA, | ||
367 | TILE_OPC_SRA_SN, | ||
368 | TILE_OPC_SRAB, | ||
369 | TILE_OPC_SRAB_SN, | ||
370 | TILE_OPC_SRAH, | ||
371 | TILE_OPC_SRAH_SN, | ||
372 | TILE_OPC_SRAI, | ||
373 | TILE_OPC_SRAI_SN, | ||
374 | TILE_OPC_SRAIB, | ||
375 | TILE_OPC_SRAIB_SN, | ||
376 | TILE_OPC_SRAIH, | ||
377 | TILE_OPC_SRAIH_SN, | ||
378 | TILE_OPC_SUB, | ||
379 | TILE_OPC_SUB_SN, | ||
380 | TILE_OPC_SUBB, | ||
381 | TILE_OPC_SUBB_SN, | ||
382 | TILE_OPC_SUBBS_U, | ||
383 | TILE_OPC_SUBBS_U_SN, | ||
384 | TILE_OPC_SUBH, | ||
385 | TILE_OPC_SUBH_SN, | ||
386 | TILE_OPC_SUBHS, | ||
387 | TILE_OPC_SUBHS_SN, | ||
388 | TILE_OPC_SUBS, | ||
389 | TILE_OPC_SUBS_SN, | ||
390 | TILE_OPC_SW, | ||
391 | TILE_OPC_SWADD, | ||
392 | TILE_OPC_SWINT0, | ||
393 | TILE_OPC_SWINT1, | ||
394 | TILE_OPC_SWINT2, | ||
395 | TILE_OPC_SWINT3, | ||
396 | TILE_OPC_TBLIDXB0, | ||
397 | TILE_OPC_TBLIDXB0_SN, | ||
398 | TILE_OPC_TBLIDXB1, | ||
399 | TILE_OPC_TBLIDXB1_SN, | ||
400 | TILE_OPC_TBLIDXB2, | ||
401 | TILE_OPC_TBLIDXB2_SN, | ||
402 | TILE_OPC_TBLIDXB3, | ||
403 | TILE_OPC_TBLIDXB3_SN, | ||
404 | TILE_OPC_TNS, | ||
405 | TILE_OPC_TNS_SN, | ||
406 | TILE_OPC_WH64, | ||
407 | TILE_OPC_XOR, | ||
408 | TILE_OPC_XOR_SN, | ||
409 | TILE_OPC_XORI, | ||
410 | TILE_OPC_XORI_SN, | ||
411 | TILE_OPC_NONE | ||
412 | } tile_mnemonic; | ||
413 | |||
414 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
415 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
416 | |||
417 | |||
418 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
419 | |||
420 | #define TILE_ELF_NAME "elf32-tilepro" | ||
421 | |||
422 | enum | ||
423 | { | ||
424 | TILE_SN_MAX_OPERANDS = 6 /* route */ | ||
425 | }; | ||
426 | |||
427 | typedef enum | ||
428 | { | ||
429 | TILE_SN_OPC_BZ, | ||
430 | TILE_SN_OPC_BNZ, | ||
431 | TILE_SN_OPC_JRR, | ||
432 | TILE_SN_OPC_FNOP, | ||
433 | TILE_SN_OPC_BLZ, | ||
434 | TILE_SN_OPC_NOP, | ||
435 | TILE_SN_OPC_MOVEI, | ||
436 | TILE_SN_OPC_MOVE, | ||
437 | TILE_SN_OPC_BGEZ, | ||
438 | TILE_SN_OPC_JR, | ||
439 | TILE_SN_OPC_BLEZ, | ||
440 | TILE_SN_OPC_BBNS, | ||
441 | TILE_SN_OPC_JALRR, | ||
442 | TILE_SN_OPC_BPT, | ||
443 | TILE_SN_OPC_JALR, | ||
444 | TILE_SN_OPC_SHR1, | ||
445 | TILE_SN_OPC_BGZ, | ||
446 | TILE_SN_OPC_BBS, | ||
447 | TILE_SN_OPC_SHL8II, | ||
448 | TILE_SN_OPC_ADDI, | ||
449 | TILE_SN_OPC_HALT, | ||
450 | TILE_SN_OPC_ROUTE, | ||
451 | TILE_SN_OPC_NONE | ||
452 | } tile_sn_mnemonic; | ||
453 | |||
454 | extern const unsigned char tile_sn_route_encode[6 * 6 * 6]; | ||
455 | extern const signed char tile_sn_route_decode[256][3]; | ||
456 | extern const char tile_sn_direction_names[6][5]; | ||
457 | extern const signed char tile_sn_dest_map[6][6]; | ||
458 | |||
459 | |||
460 | static __inline unsigned int | ||
461 | get_BrOff_SN(tile_bundle_bits num) | ||
462 | { | ||
463 | const unsigned int n = (unsigned int)num; | ||
464 | return (((n >> 0)) & 0x3ff); | ||
465 | } | ||
466 | |||
467 | static __inline unsigned int | ||
468 | get_BrOff_X1(tile_bundle_bits n) | ||
469 | { | ||
470 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
471 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
472 | } | ||
473 | |||
474 | static __inline unsigned int | ||
475 | get_BrType_X1(tile_bundle_bits n) | ||
476 | { | ||
477 | return (((unsigned int)(n >> 31)) & 0xf); | ||
478 | } | ||
479 | |||
480 | static __inline unsigned int | ||
481 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
482 | { | ||
483 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
484 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
485 | } | ||
486 | |||
487 | static __inline unsigned int | ||
488 | get_Dest_SN(tile_bundle_bits num) | ||
489 | { | ||
490 | const unsigned int n = (unsigned int)num; | ||
491 | return (((n >> 2)) & 0x3); | ||
492 | } | ||
493 | |||
494 | static __inline unsigned int | ||
495 | get_Dest_X0(tile_bundle_bits num) | ||
496 | { | ||
497 | const unsigned int n = (unsigned int)num; | ||
498 | return (((n >> 0)) & 0x3f); | ||
499 | } | ||
500 | |||
501 | static __inline unsigned int | ||
502 | get_Dest_X1(tile_bundle_bits n) | ||
503 | { | ||
504 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
505 | } | ||
506 | |||
507 | static __inline unsigned int | ||
508 | get_Dest_Y0(tile_bundle_bits num) | ||
509 | { | ||
510 | const unsigned int n = (unsigned int)num; | ||
511 | return (((n >> 0)) & 0x3f); | ||
512 | } | ||
513 | |||
514 | static __inline unsigned int | ||
515 | get_Dest_Y1(tile_bundle_bits n) | ||
516 | { | ||
517 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
518 | } | ||
519 | |||
520 | static __inline unsigned int | ||
521 | get_Imm16_X0(tile_bundle_bits num) | ||
522 | { | ||
523 | const unsigned int n = (unsigned int)num; | ||
524 | return (((n >> 12)) & 0xffff); | ||
525 | } | ||
526 | |||
527 | static __inline unsigned int | ||
528 | get_Imm16_X1(tile_bundle_bits n) | ||
529 | { | ||
530 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
531 | } | ||
532 | |||
533 | static __inline unsigned int | ||
534 | get_Imm8_SN(tile_bundle_bits num) | ||
535 | { | ||
536 | const unsigned int n = (unsigned int)num; | ||
537 | return (((n >> 0)) & 0xff); | ||
538 | } | ||
539 | |||
540 | static __inline unsigned int | ||
541 | get_Imm8_X0(tile_bundle_bits num) | ||
542 | { | ||
543 | const unsigned int n = (unsigned int)num; | ||
544 | return (((n >> 12)) & 0xff); | ||
545 | } | ||
546 | |||
547 | static __inline unsigned int | ||
548 | get_Imm8_X1(tile_bundle_bits n) | ||
549 | { | ||
550 | return (((unsigned int)(n >> 43)) & 0xff); | ||
551 | } | ||
552 | |||
553 | static __inline unsigned int | ||
554 | get_Imm8_Y0(tile_bundle_bits num) | ||
555 | { | ||
556 | const unsigned int n = (unsigned int)num; | ||
557 | return (((n >> 12)) & 0xff); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_Imm8_Y1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0xff); | ||
564 | } | ||
565 | |||
566 | static __inline unsigned int | ||
567 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
568 | { | ||
569 | const unsigned int n = (unsigned int)num; | ||
570 | return (((n >> 20)) & 0x7f); | ||
571 | } | ||
572 | |||
573 | static __inline unsigned int | ||
574 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
575 | { | ||
576 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
577 | } | ||
578 | |||
579 | static __inline unsigned int | ||
580 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
581 | { | ||
582 | const unsigned int n = (unsigned int)num; | ||
583 | return (((n >> 8)) & 0x3); | ||
584 | } | ||
585 | |||
586 | static __inline unsigned int | ||
587 | get_JOffLong_X1(tile_bundle_bits n) | ||
588 | { | ||
589 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
590 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
591 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
592 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
593 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
594 | } | ||
595 | |||
596 | static __inline unsigned int | ||
597 | get_JOff_X1(tile_bundle_bits n) | ||
598 | { | ||
599 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
600 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
601 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
602 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
603 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
604 | } | ||
605 | |||
606 | static __inline unsigned int | ||
607 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
608 | { | ||
609 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
610 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
611 | } | ||
612 | |||
613 | static __inline unsigned int | ||
614 | get_MMEnd_X0(tile_bundle_bits num) | ||
615 | { | ||
616 | const unsigned int n = (unsigned int)num; | ||
617 | return (((n >> 18)) & 0x1f); | ||
618 | } | ||
619 | |||
620 | static __inline unsigned int | ||
621 | get_MMEnd_X1(tile_bundle_bits n) | ||
622 | { | ||
623 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
624 | } | ||
625 | |||
626 | static __inline unsigned int | ||
627 | get_MMStart_X0(tile_bundle_bits num) | ||
628 | { | ||
629 | const unsigned int n = (unsigned int)num; | ||
630 | return (((n >> 23)) & 0x1f); | ||
631 | } | ||
632 | |||
633 | static __inline unsigned int | ||
634 | get_MMStart_X1(tile_bundle_bits n) | ||
635 | { | ||
636 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
637 | } | ||
638 | |||
639 | static __inline unsigned int | ||
640 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
641 | { | ||
642 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
643 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
644 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
645 | } | ||
646 | |||
647 | static __inline unsigned int | ||
648 | get_Mode(tile_bundle_bits n) | ||
649 | { | ||
650 | return (((unsigned int)(n >> 63)) & 0x1); | ||
651 | } | ||
652 | |||
653 | static __inline unsigned int | ||
654 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
655 | { | ||
656 | const unsigned int n = (unsigned int)num; | ||
657 | return (((n >> 0)) & 0xf); | ||
658 | } | ||
659 | |||
660 | static __inline unsigned int | ||
661 | get_Opcode_SN(tile_bundle_bits num) | ||
662 | { | ||
663 | const unsigned int n = (unsigned int)num; | ||
664 | return (((n >> 10)) & 0x3f); | ||
665 | } | ||
666 | |||
667 | static __inline unsigned int | ||
668 | get_Opcode_X0(tile_bundle_bits num) | ||
669 | { | ||
670 | const unsigned int n = (unsigned int)num; | ||
671 | return (((n >> 28)) & 0x7); | ||
672 | } | ||
673 | |||
674 | static __inline unsigned int | ||
675 | get_Opcode_X1(tile_bundle_bits n) | ||
676 | { | ||
677 | return (((unsigned int)(n >> 59)) & 0xf); | ||
678 | } | ||
679 | |||
680 | static __inline unsigned int | ||
681 | get_Opcode_Y0(tile_bundle_bits num) | ||
682 | { | ||
683 | const unsigned int n = (unsigned int)num; | ||
684 | return (((n >> 27)) & 0xf); | ||
685 | } | ||
686 | |||
687 | static __inline unsigned int | ||
688 | get_Opcode_Y1(tile_bundle_bits n) | ||
689 | { | ||
690 | return (((unsigned int)(n >> 59)) & 0xf); | ||
691 | } | ||
692 | |||
693 | static __inline unsigned int | ||
694 | get_Opcode_Y2(tile_bundle_bits n) | ||
695 | { | ||
696 | return (((unsigned int)(n >> 56)) & 0x7); | ||
697 | } | ||
698 | |||
699 | static __inline unsigned int | ||
700 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
701 | { | ||
702 | const unsigned int n = (unsigned int)num; | ||
703 | return (((n >> 4)) & 0xf); | ||
704 | } | ||
705 | |||
706 | static __inline unsigned int | ||
707 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
708 | { | ||
709 | const unsigned int n = (unsigned int)num; | ||
710 | return (((n >> 18)) & 0x1ff); | ||
711 | } | ||
712 | |||
713 | static __inline unsigned int | ||
714 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
715 | { | ||
716 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
717 | } | ||
718 | |||
719 | static __inline unsigned int | ||
720 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
721 | { | ||
722 | const unsigned int n = (unsigned int)num; | ||
723 | return (((n >> 18)) & 0x3); | ||
724 | } | ||
725 | |||
726 | static __inline unsigned int | ||
727 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
728 | { | ||
729 | return (((unsigned int)(n >> 49)) & 0x3); | ||
730 | } | ||
731 | |||
732 | static __inline unsigned int | ||
733 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
734 | { | ||
735 | const unsigned int n = (unsigned int)num; | ||
736 | return (((n >> 0)) & 0x3ff); | ||
737 | } | ||
738 | |||
739 | static __inline unsigned int | ||
740 | get_S_X0(tile_bundle_bits num) | ||
741 | { | ||
742 | const unsigned int n = (unsigned int)num; | ||
743 | return (((n >> 27)) & 0x1); | ||
744 | } | ||
745 | |||
746 | static __inline unsigned int | ||
747 | get_S_X1(tile_bundle_bits n) | ||
748 | { | ||
749 | return (((unsigned int)(n >> 58)) & 0x1); | ||
750 | } | ||
751 | |||
752 | static __inline unsigned int | ||
753 | get_ShAmt_X0(tile_bundle_bits num) | ||
754 | { | ||
755 | const unsigned int n = (unsigned int)num; | ||
756 | return (((n >> 12)) & 0x1f); | ||
757 | } | ||
758 | |||
759 | static __inline unsigned int | ||
760 | get_ShAmt_X1(tile_bundle_bits n) | ||
761 | { | ||
762 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
763 | } | ||
764 | |||
765 | static __inline unsigned int | ||
766 | get_ShAmt_Y0(tile_bundle_bits num) | ||
767 | { | ||
768 | const unsigned int n = (unsigned int)num; | ||
769 | return (((n >> 12)) & 0x1f); | ||
770 | } | ||
771 | |||
772 | static __inline unsigned int | ||
773 | get_ShAmt_Y1(tile_bundle_bits n) | ||
774 | { | ||
775 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
776 | } | ||
777 | |||
778 | static __inline unsigned int | ||
779 | get_SrcA_X0(tile_bundle_bits num) | ||
780 | { | ||
781 | const unsigned int n = (unsigned int)num; | ||
782 | return (((n >> 6)) & 0x3f); | ||
783 | } | ||
784 | |||
785 | static __inline unsigned int | ||
786 | get_SrcA_X1(tile_bundle_bits n) | ||
787 | { | ||
788 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
789 | } | ||
790 | |||
791 | static __inline unsigned int | ||
792 | get_SrcA_Y0(tile_bundle_bits num) | ||
793 | { | ||
794 | const unsigned int n = (unsigned int)num; | ||
795 | return (((n >> 6)) & 0x3f); | ||
796 | } | ||
797 | |||
798 | static __inline unsigned int | ||
799 | get_SrcA_Y1(tile_bundle_bits n) | ||
800 | { | ||
801 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
802 | } | ||
803 | |||
804 | static __inline unsigned int | ||
805 | get_SrcA_Y2(tile_bundle_bits n) | ||
806 | { | ||
807 | return (((n >> 26)) & 0x00000001) | | ||
808 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
809 | } | ||
810 | |||
811 | static __inline unsigned int | ||
812 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
813 | { | ||
814 | const unsigned int n = (unsigned int)num; | ||
815 | return (((n >> 20)) & 0x3f); | ||
816 | } | ||
817 | |||
818 | static __inline unsigned int | ||
819 | get_SrcB_X0(tile_bundle_bits num) | ||
820 | { | ||
821 | const unsigned int n = (unsigned int)num; | ||
822 | return (((n >> 12)) & 0x3f); | ||
823 | } | ||
824 | |||
825 | static __inline unsigned int | ||
826 | get_SrcB_X1(tile_bundle_bits n) | ||
827 | { | ||
828 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
829 | } | ||
830 | |||
831 | static __inline unsigned int | ||
832 | get_SrcB_Y0(tile_bundle_bits num) | ||
833 | { | ||
834 | const unsigned int n = (unsigned int)num; | ||
835 | return (((n >> 12)) & 0x3f); | ||
836 | } | ||
837 | |||
838 | static __inline unsigned int | ||
839 | get_SrcB_Y1(tile_bundle_bits n) | ||
840 | { | ||
841 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
842 | } | ||
843 | |||
844 | static __inline unsigned int | ||
845 | get_Src_SN(tile_bundle_bits num) | ||
846 | { | ||
847 | const unsigned int n = (unsigned int)num; | ||
848 | return (((n >> 0)) & 0x3); | ||
849 | } | ||
850 | |||
851 | static __inline unsigned int | ||
852 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
853 | { | ||
854 | const unsigned int n = (unsigned int)num; | ||
855 | return (((n >> 12)) & 0x1f); | ||
856 | } | ||
857 | |||
858 | static __inline unsigned int | ||
859 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
860 | { | ||
861 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
862 | } | ||
863 | |||
864 | static __inline unsigned int | ||
865 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
866 | { | ||
867 | const unsigned int n = (unsigned int)num; | ||
868 | return (((n >> 12)) & 0x1f); | ||
869 | } | ||
870 | |||
871 | static __inline unsigned int | ||
872 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
873 | { | ||
874 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
875 | } | ||
876 | |||
877 | static __inline unsigned int | ||
878 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return (((n >> 17)) & 0x3ff); | ||
882 | } | ||
883 | |||
884 | static __inline unsigned int | ||
885 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
886 | { | ||
887 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
888 | } | ||
889 | |||
890 | static __inline unsigned int | ||
891 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
892 | { | ||
893 | const unsigned int n = (unsigned int)num; | ||
894 | return (((n >> 17)) & 0x7); | ||
895 | } | ||
896 | |||
897 | static __inline unsigned int | ||
898 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
899 | { | ||
900 | return (((unsigned int)(n >> 48)) & 0x7); | ||
901 | } | ||
902 | |||
903 | |||
904 | static __inline int | ||
905 | sign_extend(int n, int num_bits) | ||
906 | { | ||
907 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
908 | return (n << shift) >> shift; | ||
909 | } | ||
910 | |||
911 | |||
912 | |||
913 | static __inline tile_bundle_bits | ||
914 | create_BrOff_SN(int num) | ||
915 | { | ||
916 | const unsigned int n = (unsigned int)num; | ||
917 | return ((n & 0x3ff) << 0); | ||
918 | } | ||
919 | |||
920 | static __inline tile_bundle_bits | ||
921 | create_BrOff_X1(int num) | ||
922 | { | ||
923 | const unsigned int n = (unsigned int)num; | ||
924 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
925 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_BrType_X1(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Imm8_X1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
940 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
941 | } | ||
942 | |||
943 | static __inline tile_bundle_bits | ||
944 | create_Dest_SN(int num) | ||
945 | { | ||
946 | const unsigned int n = (unsigned int)num; | ||
947 | return ((n & 0x3) << 2); | ||
948 | } | ||
949 | |||
950 | static __inline tile_bundle_bits | ||
951 | create_Dest_X0(int num) | ||
952 | { | ||
953 | const unsigned int n = (unsigned int)num; | ||
954 | return ((n & 0x3f) << 0); | ||
955 | } | ||
956 | |||
957 | static __inline tile_bundle_bits | ||
958 | create_Dest_X1(int num) | ||
959 | { | ||
960 | const unsigned int n = (unsigned int)num; | ||
961 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
962 | } | ||
963 | |||
964 | static __inline tile_bundle_bits | ||
965 | create_Dest_Y0(int num) | ||
966 | { | ||
967 | const unsigned int n = (unsigned int)num; | ||
968 | return ((n & 0x3f) << 0); | ||
969 | } | ||
970 | |||
971 | static __inline tile_bundle_bits | ||
972 | create_Dest_Y1(int num) | ||
973 | { | ||
974 | const unsigned int n = (unsigned int)num; | ||
975 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
976 | } | ||
977 | |||
978 | static __inline tile_bundle_bits | ||
979 | create_Imm16_X0(int num) | ||
980 | { | ||
981 | const unsigned int n = (unsigned int)num; | ||
982 | return ((n & 0xffff) << 12); | ||
983 | } | ||
984 | |||
985 | static __inline tile_bundle_bits | ||
986 | create_Imm16_X1(int num) | ||
987 | { | ||
988 | const unsigned int n = (unsigned int)num; | ||
989 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
990 | } | ||
991 | |||
992 | static __inline tile_bundle_bits | ||
993 | create_Imm8_SN(int num) | ||
994 | { | ||
995 | const unsigned int n = (unsigned int)num; | ||
996 | return ((n & 0xff) << 0); | ||
997 | } | ||
998 | |||
999 | static __inline tile_bundle_bits | ||
1000 | create_Imm8_X0(int num) | ||
1001 | { | ||
1002 | const unsigned int n = (unsigned int)num; | ||
1003 | return ((n & 0xff) << 12); | ||
1004 | } | ||
1005 | |||
1006 | static __inline tile_bundle_bits | ||
1007 | create_Imm8_X1(int num) | ||
1008 | { | ||
1009 | const unsigned int n = (unsigned int)num; | ||
1010 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1011 | } | ||
1012 | |||
1013 | static __inline tile_bundle_bits | ||
1014 | create_Imm8_Y0(int num) | ||
1015 | { | ||
1016 | const unsigned int n = (unsigned int)num; | ||
1017 | return ((n & 0xff) << 12); | ||
1018 | } | ||
1019 | |||
1020 | static __inline tile_bundle_bits | ||
1021 | create_Imm8_Y1(int num) | ||
1022 | { | ||
1023 | const unsigned int n = (unsigned int)num; | ||
1024 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1025 | } | ||
1026 | |||
1027 | static __inline tile_bundle_bits | ||
1028 | create_ImmOpcodeExtension_X0(int num) | ||
1029 | { | ||
1030 | const unsigned int n = (unsigned int)num; | ||
1031 | return ((n & 0x7f) << 20); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_ImmOpcodeExtension_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1039 | } | ||
1040 | |||
1041 | static __inline tile_bundle_bits | ||
1042 | create_ImmRROpcodeExtension_SN(int num) | ||
1043 | { | ||
1044 | const unsigned int n = (unsigned int)num; | ||
1045 | return ((n & 0x3) << 8); | ||
1046 | } | ||
1047 | |||
1048 | static __inline tile_bundle_bits | ||
1049 | create_JOffLong_X1(int num) | ||
1050 | { | ||
1051 | const unsigned int n = (unsigned int)num; | ||
1052 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1053 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1054 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1055 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1056 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1057 | } | ||
1058 | |||
1059 | static __inline tile_bundle_bits | ||
1060 | create_JOff_X1(int num) | ||
1061 | { | ||
1062 | const unsigned int n = (unsigned int)num; | ||
1063 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1064 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1065 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1066 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1067 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MF_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1075 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1076 | } | ||
1077 | |||
1078 | static __inline tile_bundle_bits | ||
1079 | create_MMEnd_X0(int num) | ||
1080 | { | ||
1081 | const unsigned int n = (unsigned int)num; | ||
1082 | return ((n & 0x1f) << 18); | ||
1083 | } | ||
1084 | |||
1085 | static __inline tile_bundle_bits | ||
1086 | create_MMEnd_X1(int num) | ||
1087 | { | ||
1088 | const unsigned int n = (unsigned int)num; | ||
1089 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1090 | } | ||
1091 | |||
1092 | static __inline tile_bundle_bits | ||
1093 | create_MMStart_X0(int num) | ||
1094 | { | ||
1095 | const unsigned int n = (unsigned int)num; | ||
1096 | return ((n & 0x1f) << 23); | ||
1097 | } | ||
1098 | |||
1099 | static __inline tile_bundle_bits | ||
1100 | create_MMStart_X1(int num) | ||
1101 | { | ||
1102 | const unsigned int n = (unsigned int)num; | ||
1103 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1104 | } | ||
1105 | |||
1106 | static __inline tile_bundle_bits | ||
1107 | create_MT_Imm15_X1(int num) | ||
1108 | { | ||
1109 | const unsigned int n = (unsigned int)num; | ||
1110 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1111 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1112 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1113 | } | ||
1114 | |||
1115 | static __inline tile_bundle_bits | ||
1116 | create_Mode(int num) | ||
1117 | { | ||
1118 | const unsigned int n = (unsigned int)num; | ||
1119 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1120 | } | ||
1121 | |||
1122 | static __inline tile_bundle_bits | ||
1123 | create_NoRegOpcodeExtension_SN(int num) | ||
1124 | { | ||
1125 | const unsigned int n = (unsigned int)num; | ||
1126 | return ((n & 0xf) << 0); | ||
1127 | } | ||
1128 | |||
1129 | static __inline tile_bundle_bits | ||
1130 | create_Opcode_SN(int num) | ||
1131 | { | ||
1132 | const unsigned int n = (unsigned int)num; | ||
1133 | return ((n & 0x3f) << 10); | ||
1134 | } | ||
1135 | |||
1136 | static __inline tile_bundle_bits | ||
1137 | create_Opcode_X0(int num) | ||
1138 | { | ||
1139 | const unsigned int n = (unsigned int)num; | ||
1140 | return ((n & 0x7) << 28); | ||
1141 | } | ||
1142 | |||
1143 | static __inline tile_bundle_bits | ||
1144 | create_Opcode_X1(int num) | ||
1145 | { | ||
1146 | const unsigned int n = (unsigned int)num; | ||
1147 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1148 | } | ||
1149 | |||
1150 | static __inline tile_bundle_bits | ||
1151 | create_Opcode_Y0(int num) | ||
1152 | { | ||
1153 | const unsigned int n = (unsigned int)num; | ||
1154 | return ((n & 0xf) << 27); | ||
1155 | } | ||
1156 | |||
1157 | static __inline tile_bundle_bits | ||
1158 | create_Opcode_Y1(int num) | ||
1159 | { | ||
1160 | const unsigned int n = (unsigned int)num; | ||
1161 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1162 | } | ||
1163 | |||
1164 | static __inline tile_bundle_bits | ||
1165 | create_Opcode_Y2(int num) | ||
1166 | { | ||
1167 | const unsigned int n = (unsigned int)num; | ||
1168 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1169 | } | ||
1170 | |||
1171 | static __inline tile_bundle_bits | ||
1172 | create_RROpcodeExtension_SN(int num) | ||
1173 | { | ||
1174 | const unsigned int n = (unsigned int)num; | ||
1175 | return ((n & 0xf) << 4); | ||
1176 | } | ||
1177 | |||
1178 | static __inline tile_bundle_bits | ||
1179 | create_RRROpcodeExtension_X0(int num) | ||
1180 | { | ||
1181 | const unsigned int n = (unsigned int)num; | ||
1182 | return ((n & 0x1ff) << 18); | ||
1183 | } | ||
1184 | |||
1185 | static __inline tile_bundle_bits | ||
1186 | create_RRROpcodeExtension_X1(int num) | ||
1187 | { | ||
1188 | const unsigned int n = (unsigned int)num; | ||
1189 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1190 | } | ||
1191 | |||
1192 | static __inline tile_bundle_bits | ||
1193 | create_RRROpcodeExtension_Y0(int num) | ||
1194 | { | ||
1195 | const unsigned int n = (unsigned int)num; | ||
1196 | return ((n & 0x3) << 18); | ||
1197 | } | ||
1198 | |||
1199 | static __inline tile_bundle_bits | ||
1200 | create_RRROpcodeExtension_Y1(int num) | ||
1201 | { | ||
1202 | const unsigned int n = (unsigned int)num; | ||
1203 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1204 | } | ||
1205 | |||
1206 | static __inline tile_bundle_bits | ||
1207 | create_RouteOpcodeExtension_SN(int num) | ||
1208 | { | ||
1209 | const unsigned int n = (unsigned int)num; | ||
1210 | return ((n & 0x3ff) << 0); | ||
1211 | } | ||
1212 | |||
1213 | static __inline tile_bundle_bits | ||
1214 | create_S_X0(int num) | ||
1215 | { | ||
1216 | const unsigned int n = (unsigned int)num; | ||
1217 | return ((n & 0x1) << 27); | ||
1218 | } | ||
1219 | |||
1220 | static __inline tile_bundle_bits | ||
1221 | create_S_X1(int num) | ||
1222 | { | ||
1223 | const unsigned int n = (unsigned int)num; | ||
1224 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1225 | } | ||
1226 | |||
1227 | static __inline tile_bundle_bits | ||
1228 | create_ShAmt_X0(int num) | ||
1229 | { | ||
1230 | const unsigned int n = (unsigned int)num; | ||
1231 | return ((n & 0x1f) << 12); | ||
1232 | } | ||
1233 | |||
1234 | static __inline tile_bundle_bits | ||
1235 | create_ShAmt_X1(int num) | ||
1236 | { | ||
1237 | const unsigned int n = (unsigned int)num; | ||
1238 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1239 | } | ||
1240 | |||
1241 | static __inline tile_bundle_bits | ||
1242 | create_ShAmt_Y0(int num) | ||
1243 | { | ||
1244 | const unsigned int n = (unsigned int)num; | ||
1245 | return ((n & 0x1f) << 12); | ||
1246 | } | ||
1247 | |||
1248 | static __inline tile_bundle_bits | ||
1249 | create_ShAmt_Y1(int num) | ||
1250 | { | ||
1251 | const unsigned int n = (unsigned int)num; | ||
1252 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcA_X0(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 6); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcA_X1(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcA_Y0(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return ((n & 0x3f) << 6); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcA_Y1(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcA_Y2(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return ((n & 0x00000001) << 26) | | ||
1288 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1289 | } | ||
1290 | |||
1291 | static __inline tile_bundle_bits | ||
1292 | create_SrcBDest_Y2(int num) | ||
1293 | { | ||
1294 | const unsigned int n = (unsigned int)num; | ||
1295 | return ((n & 0x3f) << 20); | ||
1296 | } | ||
1297 | |||
1298 | static __inline tile_bundle_bits | ||
1299 | create_SrcB_X0(int num) | ||
1300 | { | ||
1301 | const unsigned int n = (unsigned int)num; | ||
1302 | return ((n & 0x3f) << 12); | ||
1303 | } | ||
1304 | |||
1305 | static __inline tile_bundle_bits | ||
1306 | create_SrcB_X1(int num) | ||
1307 | { | ||
1308 | const unsigned int n = (unsigned int)num; | ||
1309 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1310 | } | ||
1311 | |||
1312 | static __inline tile_bundle_bits | ||
1313 | create_SrcB_Y0(int num) | ||
1314 | { | ||
1315 | const unsigned int n = (unsigned int)num; | ||
1316 | return ((n & 0x3f) << 12); | ||
1317 | } | ||
1318 | |||
1319 | static __inline tile_bundle_bits | ||
1320 | create_SrcB_Y1(int num) | ||
1321 | { | ||
1322 | const unsigned int n = (unsigned int)num; | ||
1323 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1324 | } | ||
1325 | |||
1326 | static __inline tile_bundle_bits | ||
1327 | create_Src_SN(int num) | ||
1328 | { | ||
1329 | const unsigned int n = (unsigned int)num; | ||
1330 | return ((n & 0x3) << 0); | ||
1331 | } | ||
1332 | |||
1333 | static __inline tile_bundle_bits | ||
1334 | create_UnOpcodeExtension_X0(int num) | ||
1335 | { | ||
1336 | const unsigned int n = (unsigned int)num; | ||
1337 | return ((n & 0x1f) << 12); | ||
1338 | } | ||
1339 | |||
1340 | static __inline tile_bundle_bits | ||
1341 | create_UnOpcodeExtension_X1(int num) | ||
1342 | { | ||
1343 | const unsigned int n = (unsigned int)num; | ||
1344 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1345 | } | ||
1346 | |||
1347 | static __inline tile_bundle_bits | ||
1348 | create_UnOpcodeExtension_Y0(int num) | ||
1349 | { | ||
1350 | const unsigned int n = (unsigned int)num; | ||
1351 | return ((n & 0x1f) << 12); | ||
1352 | } | ||
1353 | |||
1354 | static __inline tile_bundle_bits | ||
1355 | create_UnOpcodeExtension_Y1(int num) | ||
1356 | { | ||
1357 | const unsigned int n = (unsigned int)num; | ||
1358 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1359 | } | ||
1360 | |||
1361 | static __inline tile_bundle_bits | ||
1362 | create_UnShOpcodeExtension_X0(int num) | ||
1363 | { | ||
1364 | const unsigned int n = (unsigned int)num; | ||
1365 | return ((n & 0x3ff) << 17); | ||
1366 | } | ||
1367 | |||
1368 | static __inline tile_bundle_bits | ||
1369 | create_UnShOpcodeExtension_X1(int num) | ||
1370 | { | ||
1371 | const unsigned int n = (unsigned int)num; | ||
1372 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1373 | } | ||
1374 | |||
1375 | static __inline tile_bundle_bits | ||
1376 | create_UnShOpcodeExtension_Y0(int num) | ||
1377 | { | ||
1378 | const unsigned int n = (unsigned int)num; | ||
1379 | return ((n & 0x7) << 17); | ||
1380 | } | ||
1381 | |||
1382 | static __inline tile_bundle_bits | ||
1383 | create_UnShOpcodeExtension_Y1(int num) | ||
1384 | { | ||
1385 | const unsigned int n = (unsigned int)num; | ||
1386 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1387 | } | ||
1388 | |||
1389 | |||
1390 | typedef unsigned short tile_sn_instruction_bits; | ||
1391 | |||
1392 | |||
1393 | typedef enum | ||
1394 | { | ||
1395 | TILE_PIPELINE_X0, | ||
1396 | TILE_PIPELINE_X1, | ||
1397 | TILE_PIPELINE_Y0, | ||
1398 | TILE_PIPELINE_Y1, | ||
1399 | TILE_PIPELINE_Y2, | ||
1400 | } tile_pipeline; | ||
1401 | |||
1402 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1403 | |||
1404 | typedef enum | ||
1405 | { | ||
1406 | TILE_OP_TYPE_REGISTER, | ||
1407 | TILE_OP_TYPE_IMMEDIATE, | ||
1408 | TILE_OP_TYPE_ADDRESS, | ||
1409 | TILE_OP_TYPE_SPR | ||
1410 | } tile_operand_type; | ||
1411 | |||
1412 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1413 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1414 | |||
1415 | enum | ||
1416 | { | ||
1417 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1418 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1419 | |||
1420 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1421 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1422 | |||
1423 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1424 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1425 | |||
1426 | /* Instructions take this many bytes. */ | ||
1427 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1428 | |||
1429 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1430 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1431 | |||
1432 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1433 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1434 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1435 | |||
1436 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1437 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1438 | |||
1439 | /* Static network instructions take this many bytes. */ | ||
1440 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1441 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1442 | |||
1443 | /* Number of registers (some are magic, such as network I/O). */ | ||
1444 | TILE_NUM_REGISTERS = 64, | ||
1445 | |||
1446 | /* Number of static network registers. */ | ||
1447 | TILE_NUM_SN_REGISTERS = 4 | ||
1448 | }; | ||
1449 | |||
1450 | |||
1451 | struct tile_operand | ||
1452 | { | ||
1453 | /* Is this operand a register, immediate or address? */ | ||
1454 | tile_operand_type type; | ||
1455 | |||
1456 | /* The default relocation type for this operand. */ | ||
1457 | signed int default_reloc : 16; | ||
1458 | |||
1459 | /* How many bits is this value? (used for range checking) */ | ||
1460 | unsigned int num_bits : 5; | ||
1461 | |||
1462 | /* Is the value signed? (used for range checking) */ | ||
1463 | unsigned int is_signed : 1; | ||
1464 | |||
1465 | /* Is this operand a source register? */ | ||
1466 | unsigned int is_src_reg : 1; | ||
1467 | |||
1468 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1469 | unsigned int is_dest_reg : 1; | ||
1470 | |||
1471 | /* Is this operand PC-relative? */ | ||
1472 | unsigned int is_pc_relative : 1; | ||
1473 | |||
1474 | /* By how many bits do we right shift the value before inserting? */ | ||
1475 | unsigned int rightshift : 2; | ||
1476 | |||
1477 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1478 | tile_bundle_bits (*insert) (int op); | ||
1479 | |||
1480 | /* Extract this operand and return it. */ | ||
1481 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1482 | }; | ||
1483 | |||
1484 | |||
1485 | extern const struct tile_operand tile_operands[]; | ||
1486 | |||
1487 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1488 | extern const unsigned short * const | ||
1489 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1490 | |||
1491 | |||
1492 | struct tile_opcode | ||
1493 | { | ||
1494 | /* The opcode mnemonic, e.g. "add" */ | ||
1495 | const char *name; | ||
1496 | |||
1497 | /* The enum value for this mnemonic. */ | ||
1498 | tile_mnemonic mnemonic; | ||
1499 | |||
1500 | /* A bit mask of which of the five pipes this instruction | ||
1501 | is compatible with: | ||
1502 | X0 0x01 | ||
1503 | X1 0x02 | ||
1504 | Y0 0x04 | ||
1505 | Y1 0x08 | ||
1506 | Y2 0x10 */ | ||
1507 | unsigned char pipes; | ||
1508 | |||
1509 | /* How many operands are there? */ | ||
1510 | unsigned char num_operands; | ||
1511 | |||
1512 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1513 | unsigned char implicitly_written_register; | ||
1514 | |||
1515 | /* Can this be bundled with other instructions (almost always true). */ | ||
1516 | unsigned char can_bundle; | ||
1517 | |||
1518 | /* The description of the operands. Each of these is an | ||
1519 | * index into the tile_operands[] table. */ | ||
1520 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1521 | |||
1522 | /* A mask of which bits have predefined values for each pipeline. | ||
1523 | * This is useful for disassembly. */ | ||
1524 | tile_bundle_bits fixed_bit_masks[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1525 | |||
1526 | /* For each bit set in fixed_bit_masks, what the value is for this | ||
1527 | * instruction. */ | ||
1528 | tile_bundle_bits fixed_bit_values[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1529 | }; | ||
1530 | |||
1531 | extern const struct tile_opcode tile_opcodes[]; | ||
1532 | |||
1533 | struct tile_sn_opcode | ||
1534 | { | ||
1535 | /* The opcode mnemonic, e.g. "add" */ | ||
1536 | const char *name; | ||
1537 | |||
1538 | /* The enum value for this mnemonic. */ | ||
1539 | tile_sn_mnemonic mnemonic; | ||
1540 | |||
1541 | /* How many operands are there? */ | ||
1542 | unsigned char num_operands; | ||
1543 | |||
1544 | /* The description of the operands. Each of these is an | ||
1545 | * index into the tile_operands[] table. */ | ||
1546 | unsigned char operands[TILE_SN_MAX_OPERANDS]; | ||
1547 | |||
1548 | /* A mask of which bits have predefined values. | ||
1549 | * This is useful for disassembly. */ | ||
1550 | tile_sn_instruction_bits fixed_bit_mask; | ||
1551 | |||
1552 | /* For each bit set in fixed_bit_masks, what its value is. */ | ||
1553 | tile_sn_instruction_bits fixed_bit_values; | ||
1554 | }; | ||
1555 | |||
1556 | extern const struct tile_sn_opcode tile_sn_opcodes[]; | ||
1557 | |||
1558 | /* Used for non-textual disassembly into structs. */ | ||
1559 | struct tile_decoded_instruction | ||
1560 | { | ||
1561 | const struct tile_opcode *opcode; | ||
1562 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1563 | int operand_values[TILE_MAX_OPERANDS]; | ||
1564 | }; | ||
1565 | |||
1566 | |||
1567 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1568 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1569 | unsigned int pc, | ||
1570 | struct tile_decoded_instruction | ||
1571 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1572 | |||
1573 | |||
1574 | /* Canonical names of all the registers. */ | ||
1575 | /* ISSUE: This table lives in "tile-dis.c" */ | ||
1576 | extern const char * const tile_register_names[]; | ||
1577 | |||
1578 | /* Descriptor for a special-purpose register. */ | ||
1579 | struct tile_spr | ||
1580 | { | ||
1581 | /* The number */ | ||
1582 | int number; | ||
1583 | |||
1584 | /* The name */ | ||
1585 | const char *name; | ||
1586 | }; | ||
1587 | |||
1588 | /* List of all the SPRs; ordered by increasing number. */ | ||
1589 | extern const struct tile_spr tile_sprs[]; | ||
1590 | |||
1591 | /* Number of special-purpose registers. */ | ||
1592 | extern const int tile_num_sprs; | ||
1593 | |||
1594 | extern const char * | ||
1595 | get_tile_spr_name (int num); | ||
1596 | |||
1597 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h new file mode 100644 index 000000000000..90f8dd372531 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_64.h | |||
@@ -0,0 +1,1597 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_ADD, | ||
32 | TILE_OPC_ADD_SN, | ||
33 | TILE_OPC_ADDB, | ||
34 | TILE_OPC_ADDB_SN, | ||
35 | TILE_OPC_ADDBS_U, | ||
36 | TILE_OPC_ADDBS_U_SN, | ||
37 | TILE_OPC_ADDH, | ||
38 | TILE_OPC_ADDH_SN, | ||
39 | TILE_OPC_ADDHS, | ||
40 | TILE_OPC_ADDHS_SN, | ||
41 | TILE_OPC_ADDI, | ||
42 | TILE_OPC_ADDI_SN, | ||
43 | TILE_OPC_ADDIB, | ||
44 | TILE_OPC_ADDIB_SN, | ||
45 | TILE_OPC_ADDIH, | ||
46 | TILE_OPC_ADDIH_SN, | ||
47 | TILE_OPC_ADDLI, | ||
48 | TILE_OPC_ADDLI_SN, | ||
49 | TILE_OPC_ADDLIS, | ||
50 | TILE_OPC_ADDS, | ||
51 | TILE_OPC_ADDS_SN, | ||
52 | TILE_OPC_ADIFFB_U, | ||
53 | TILE_OPC_ADIFFB_U_SN, | ||
54 | TILE_OPC_ADIFFH, | ||
55 | TILE_OPC_ADIFFH_SN, | ||
56 | TILE_OPC_AND, | ||
57 | TILE_OPC_AND_SN, | ||
58 | TILE_OPC_ANDI, | ||
59 | TILE_OPC_ANDI_SN, | ||
60 | TILE_OPC_AULI, | ||
61 | TILE_OPC_AVGB_U, | ||
62 | TILE_OPC_AVGB_U_SN, | ||
63 | TILE_OPC_AVGH, | ||
64 | TILE_OPC_AVGH_SN, | ||
65 | TILE_OPC_BBNS, | ||
66 | TILE_OPC_BBNS_SN, | ||
67 | TILE_OPC_BBNST, | ||
68 | TILE_OPC_BBNST_SN, | ||
69 | TILE_OPC_BBS, | ||
70 | TILE_OPC_BBS_SN, | ||
71 | TILE_OPC_BBST, | ||
72 | TILE_OPC_BBST_SN, | ||
73 | TILE_OPC_BGEZ, | ||
74 | TILE_OPC_BGEZ_SN, | ||
75 | TILE_OPC_BGEZT, | ||
76 | TILE_OPC_BGEZT_SN, | ||
77 | TILE_OPC_BGZ, | ||
78 | TILE_OPC_BGZ_SN, | ||
79 | TILE_OPC_BGZT, | ||
80 | TILE_OPC_BGZT_SN, | ||
81 | TILE_OPC_BITX, | ||
82 | TILE_OPC_BITX_SN, | ||
83 | TILE_OPC_BLEZ, | ||
84 | TILE_OPC_BLEZ_SN, | ||
85 | TILE_OPC_BLEZT, | ||
86 | TILE_OPC_BLEZT_SN, | ||
87 | TILE_OPC_BLZ, | ||
88 | TILE_OPC_BLZ_SN, | ||
89 | TILE_OPC_BLZT, | ||
90 | TILE_OPC_BLZT_SN, | ||
91 | TILE_OPC_BNZ, | ||
92 | TILE_OPC_BNZ_SN, | ||
93 | TILE_OPC_BNZT, | ||
94 | TILE_OPC_BNZT_SN, | ||
95 | TILE_OPC_BYTEX, | ||
96 | TILE_OPC_BYTEX_SN, | ||
97 | TILE_OPC_BZ, | ||
98 | TILE_OPC_BZ_SN, | ||
99 | TILE_OPC_BZT, | ||
100 | TILE_OPC_BZT_SN, | ||
101 | TILE_OPC_CLZ, | ||
102 | TILE_OPC_CLZ_SN, | ||
103 | TILE_OPC_CRC32_32, | ||
104 | TILE_OPC_CRC32_32_SN, | ||
105 | TILE_OPC_CRC32_8, | ||
106 | TILE_OPC_CRC32_8_SN, | ||
107 | TILE_OPC_CTZ, | ||
108 | TILE_OPC_CTZ_SN, | ||
109 | TILE_OPC_DRAIN, | ||
110 | TILE_OPC_DTLBPR, | ||
111 | TILE_OPC_DWORD_ALIGN, | ||
112 | TILE_OPC_DWORD_ALIGN_SN, | ||
113 | TILE_OPC_FINV, | ||
114 | TILE_OPC_FLUSH, | ||
115 | TILE_OPC_FNOP, | ||
116 | TILE_OPC_ICOH, | ||
117 | TILE_OPC_ILL, | ||
118 | TILE_OPC_INTHB, | ||
119 | TILE_OPC_INTHB_SN, | ||
120 | TILE_OPC_INTHH, | ||
121 | TILE_OPC_INTHH_SN, | ||
122 | TILE_OPC_INTLB, | ||
123 | TILE_OPC_INTLB_SN, | ||
124 | TILE_OPC_INTLH, | ||
125 | TILE_OPC_INTLH_SN, | ||
126 | TILE_OPC_INV, | ||
127 | TILE_OPC_IRET, | ||
128 | TILE_OPC_JALB, | ||
129 | TILE_OPC_JALF, | ||
130 | TILE_OPC_JALR, | ||
131 | TILE_OPC_JALRP, | ||
132 | TILE_OPC_JB, | ||
133 | TILE_OPC_JF, | ||
134 | TILE_OPC_JR, | ||
135 | TILE_OPC_JRP, | ||
136 | TILE_OPC_LB, | ||
137 | TILE_OPC_LB_SN, | ||
138 | TILE_OPC_LB_U, | ||
139 | TILE_OPC_LB_U_SN, | ||
140 | TILE_OPC_LBADD, | ||
141 | TILE_OPC_LBADD_SN, | ||
142 | TILE_OPC_LBADD_U, | ||
143 | TILE_OPC_LBADD_U_SN, | ||
144 | TILE_OPC_LH, | ||
145 | TILE_OPC_LH_SN, | ||
146 | TILE_OPC_LH_U, | ||
147 | TILE_OPC_LH_U_SN, | ||
148 | TILE_OPC_LHADD, | ||
149 | TILE_OPC_LHADD_SN, | ||
150 | TILE_OPC_LHADD_U, | ||
151 | TILE_OPC_LHADD_U_SN, | ||
152 | TILE_OPC_LNK, | ||
153 | TILE_OPC_LNK_SN, | ||
154 | TILE_OPC_LW, | ||
155 | TILE_OPC_LW_SN, | ||
156 | TILE_OPC_LW_NA, | ||
157 | TILE_OPC_LW_NA_SN, | ||
158 | TILE_OPC_LWADD, | ||
159 | TILE_OPC_LWADD_SN, | ||
160 | TILE_OPC_LWADD_NA, | ||
161 | TILE_OPC_LWADD_NA_SN, | ||
162 | TILE_OPC_MAXB_U, | ||
163 | TILE_OPC_MAXB_U_SN, | ||
164 | TILE_OPC_MAXH, | ||
165 | TILE_OPC_MAXH_SN, | ||
166 | TILE_OPC_MAXIB_U, | ||
167 | TILE_OPC_MAXIB_U_SN, | ||
168 | TILE_OPC_MAXIH, | ||
169 | TILE_OPC_MAXIH_SN, | ||
170 | TILE_OPC_MF, | ||
171 | TILE_OPC_MFSPR, | ||
172 | TILE_OPC_MINB_U, | ||
173 | TILE_OPC_MINB_U_SN, | ||
174 | TILE_OPC_MINH, | ||
175 | TILE_OPC_MINH_SN, | ||
176 | TILE_OPC_MINIB_U, | ||
177 | TILE_OPC_MINIB_U_SN, | ||
178 | TILE_OPC_MINIH, | ||
179 | TILE_OPC_MINIH_SN, | ||
180 | TILE_OPC_MM, | ||
181 | TILE_OPC_MNZ, | ||
182 | TILE_OPC_MNZ_SN, | ||
183 | TILE_OPC_MNZB, | ||
184 | TILE_OPC_MNZB_SN, | ||
185 | TILE_OPC_MNZH, | ||
186 | TILE_OPC_MNZH_SN, | ||
187 | TILE_OPC_MTSPR, | ||
188 | TILE_OPC_MULHH_SS, | ||
189 | TILE_OPC_MULHH_SS_SN, | ||
190 | TILE_OPC_MULHH_SU, | ||
191 | TILE_OPC_MULHH_SU_SN, | ||
192 | TILE_OPC_MULHH_UU, | ||
193 | TILE_OPC_MULHH_UU_SN, | ||
194 | TILE_OPC_MULHHA_SS, | ||
195 | TILE_OPC_MULHHA_SS_SN, | ||
196 | TILE_OPC_MULHHA_SU, | ||
197 | TILE_OPC_MULHHA_SU_SN, | ||
198 | TILE_OPC_MULHHA_UU, | ||
199 | TILE_OPC_MULHHA_UU_SN, | ||
200 | TILE_OPC_MULHHSA_UU, | ||
201 | TILE_OPC_MULHHSA_UU_SN, | ||
202 | TILE_OPC_MULHL_SS, | ||
203 | TILE_OPC_MULHL_SS_SN, | ||
204 | TILE_OPC_MULHL_SU, | ||
205 | TILE_OPC_MULHL_SU_SN, | ||
206 | TILE_OPC_MULHL_US, | ||
207 | TILE_OPC_MULHL_US_SN, | ||
208 | TILE_OPC_MULHL_UU, | ||
209 | TILE_OPC_MULHL_UU_SN, | ||
210 | TILE_OPC_MULHLA_SS, | ||
211 | TILE_OPC_MULHLA_SS_SN, | ||
212 | TILE_OPC_MULHLA_SU, | ||
213 | TILE_OPC_MULHLA_SU_SN, | ||
214 | TILE_OPC_MULHLA_US, | ||
215 | TILE_OPC_MULHLA_US_SN, | ||
216 | TILE_OPC_MULHLA_UU, | ||
217 | TILE_OPC_MULHLA_UU_SN, | ||
218 | TILE_OPC_MULHLSA_UU, | ||
219 | TILE_OPC_MULHLSA_UU_SN, | ||
220 | TILE_OPC_MULLL_SS, | ||
221 | TILE_OPC_MULLL_SS_SN, | ||
222 | TILE_OPC_MULLL_SU, | ||
223 | TILE_OPC_MULLL_SU_SN, | ||
224 | TILE_OPC_MULLL_UU, | ||
225 | TILE_OPC_MULLL_UU_SN, | ||
226 | TILE_OPC_MULLLA_SS, | ||
227 | TILE_OPC_MULLLA_SS_SN, | ||
228 | TILE_OPC_MULLLA_SU, | ||
229 | TILE_OPC_MULLLA_SU_SN, | ||
230 | TILE_OPC_MULLLA_UU, | ||
231 | TILE_OPC_MULLLA_UU_SN, | ||
232 | TILE_OPC_MULLLSA_UU, | ||
233 | TILE_OPC_MULLLSA_UU_SN, | ||
234 | TILE_OPC_MVNZ, | ||
235 | TILE_OPC_MVNZ_SN, | ||
236 | TILE_OPC_MVZ, | ||
237 | TILE_OPC_MVZ_SN, | ||
238 | TILE_OPC_MZ, | ||
239 | TILE_OPC_MZ_SN, | ||
240 | TILE_OPC_MZB, | ||
241 | TILE_OPC_MZB_SN, | ||
242 | TILE_OPC_MZH, | ||
243 | TILE_OPC_MZH_SN, | ||
244 | TILE_OPC_NAP, | ||
245 | TILE_OPC_NOP, | ||
246 | TILE_OPC_NOR, | ||
247 | TILE_OPC_NOR_SN, | ||
248 | TILE_OPC_OR, | ||
249 | TILE_OPC_OR_SN, | ||
250 | TILE_OPC_ORI, | ||
251 | TILE_OPC_ORI_SN, | ||
252 | TILE_OPC_PACKBS_U, | ||
253 | TILE_OPC_PACKBS_U_SN, | ||
254 | TILE_OPC_PACKHB, | ||
255 | TILE_OPC_PACKHB_SN, | ||
256 | TILE_OPC_PACKHS, | ||
257 | TILE_OPC_PACKHS_SN, | ||
258 | TILE_OPC_PACKLB, | ||
259 | TILE_OPC_PACKLB_SN, | ||
260 | TILE_OPC_PCNT, | ||
261 | TILE_OPC_PCNT_SN, | ||
262 | TILE_OPC_RL, | ||
263 | TILE_OPC_RL_SN, | ||
264 | TILE_OPC_RLI, | ||
265 | TILE_OPC_RLI_SN, | ||
266 | TILE_OPC_S1A, | ||
267 | TILE_OPC_S1A_SN, | ||
268 | TILE_OPC_S2A, | ||
269 | TILE_OPC_S2A_SN, | ||
270 | TILE_OPC_S3A, | ||
271 | TILE_OPC_S3A_SN, | ||
272 | TILE_OPC_SADAB_U, | ||
273 | TILE_OPC_SADAB_U_SN, | ||
274 | TILE_OPC_SADAH, | ||
275 | TILE_OPC_SADAH_SN, | ||
276 | TILE_OPC_SADAH_U, | ||
277 | TILE_OPC_SADAH_U_SN, | ||
278 | TILE_OPC_SADB_U, | ||
279 | TILE_OPC_SADB_U_SN, | ||
280 | TILE_OPC_SADH, | ||
281 | TILE_OPC_SADH_SN, | ||
282 | TILE_OPC_SADH_U, | ||
283 | TILE_OPC_SADH_U_SN, | ||
284 | TILE_OPC_SB, | ||
285 | TILE_OPC_SBADD, | ||
286 | TILE_OPC_SEQ, | ||
287 | TILE_OPC_SEQ_SN, | ||
288 | TILE_OPC_SEQB, | ||
289 | TILE_OPC_SEQB_SN, | ||
290 | TILE_OPC_SEQH, | ||
291 | TILE_OPC_SEQH_SN, | ||
292 | TILE_OPC_SEQI, | ||
293 | TILE_OPC_SEQI_SN, | ||
294 | TILE_OPC_SEQIB, | ||
295 | TILE_OPC_SEQIB_SN, | ||
296 | TILE_OPC_SEQIH, | ||
297 | TILE_OPC_SEQIH_SN, | ||
298 | TILE_OPC_SH, | ||
299 | TILE_OPC_SHADD, | ||
300 | TILE_OPC_SHL, | ||
301 | TILE_OPC_SHL_SN, | ||
302 | TILE_OPC_SHLB, | ||
303 | TILE_OPC_SHLB_SN, | ||
304 | TILE_OPC_SHLH, | ||
305 | TILE_OPC_SHLH_SN, | ||
306 | TILE_OPC_SHLI, | ||
307 | TILE_OPC_SHLI_SN, | ||
308 | TILE_OPC_SHLIB, | ||
309 | TILE_OPC_SHLIB_SN, | ||
310 | TILE_OPC_SHLIH, | ||
311 | TILE_OPC_SHLIH_SN, | ||
312 | TILE_OPC_SHR, | ||
313 | TILE_OPC_SHR_SN, | ||
314 | TILE_OPC_SHRB, | ||
315 | TILE_OPC_SHRB_SN, | ||
316 | TILE_OPC_SHRH, | ||
317 | TILE_OPC_SHRH_SN, | ||
318 | TILE_OPC_SHRI, | ||
319 | TILE_OPC_SHRI_SN, | ||
320 | TILE_OPC_SHRIB, | ||
321 | TILE_OPC_SHRIB_SN, | ||
322 | TILE_OPC_SHRIH, | ||
323 | TILE_OPC_SHRIH_SN, | ||
324 | TILE_OPC_SLT, | ||
325 | TILE_OPC_SLT_SN, | ||
326 | TILE_OPC_SLT_U, | ||
327 | TILE_OPC_SLT_U_SN, | ||
328 | TILE_OPC_SLTB, | ||
329 | TILE_OPC_SLTB_SN, | ||
330 | TILE_OPC_SLTB_U, | ||
331 | TILE_OPC_SLTB_U_SN, | ||
332 | TILE_OPC_SLTE, | ||
333 | TILE_OPC_SLTE_SN, | ||
334 | TILE_OPC_SLTE_U, | ||
335 | TILE_OPC_SLTE_U_SN, | ||
336 | TILE_OPC_SLTEB, | ||
337 | TILE_OPC_SLTEB_SN, | ||
338 | TILE_OPC_SLTEB_U, | ||
339 | TILE_OPC_SLTEB_U_SN, | ||
340 | TILE_OPC_SLTEH, | ||
341 | TILE_OPC_SLTEH_SN, | ||
342 | TILE_OPC_SLTEH_U, | ||
343 | TILE_OPC_SLTEH_U_SN, | ||
344 | TILE_OPC_SLTH, | ||
345 | TILE_OPC_SLTH_SN, | ||
346 | TILE_OPC_SLTH_U, | ||
347 | TILE_OPC_SLTH_U_SN, | ||
348 | TILE_OPC_SLTI, | ||
349 | TILE_OPC_SLTI_SN, | ||
350 | TILE_OPC_SLTI_U, | ||
351 | TILE_OPC_SLTI_U_SN, | ||
352 | TILE_OPC_SLTIB, | ||
353 | TILE_OPC_SLTIB_SN, | ||
354 | TILE_OPC_SLTIB_U, | ||
355 | TILE_OPC_SLTIB_U_SN, | ||
356 | TILE_OPC_SLTIH, | ||
357 | TILE_OPC_SLTIH_SN, | ||
358 | TILE_OPC_SLTIH_U, | ||
359 | TILE_OPC_SLTIH_U_SN, | ||
360 | TILE_OPC_SNE, | ||
361 | TILE_OPC_SNE_SN, | ||
362 | TILE_OPC_SNEB, | ||
363 | TILE_OPC_SNEB_SN, | ||
364 | TILE_OPC_SNEH, | ||
365 | TILE_OPC_SNEH_SN, | ||
366 | TILE_OPC_SRA, | ||
367 | TILE_OPC_SRA_SN, | ||
368 | TILE_OPC_SRAB, | ||
369 | TILE_OPC_SRAB_SN, | ||
370 | TILE_OPC_SRAH, | ||
371 | TILE_OPC_SRAH_SN, | ||
372 | TILE_OPC_SRAI, | ||
373 | TILE_OPC_SRAI_SN, | ||
374 | TILE_OPC_SRAIB, | ||
375 | TILE_OPC_SRAIB_SN, | ||
376 | TILE_OPC_SRAIH, | ||
377 | TILE_OPC_SRAIH_SN, | ||
378 | TILE_OPC_SUB, | ||
379 | TILE_OPC_SUB_SN, | ||
380 | TILE_OPC_SUBB, | ||
381 | TILE_OPC_SUBB_SN, | ||
382 | TILE_OPC_SUBBS_U, | ||
383 | TILE_OPC_SUBBS_U_SN, | ||
384 | TILE_OPC_SUBH, | ||
385 | TILE_OPC_SUBH_SN, | ||
386 | TILE_OPC_SUBHS, | ||
387 | TILE_OPC_SUBHS_SN, | ||
388 | TILE_OPC_SUBS, | ||
389 | TILE_OPC_SUBS_SN, | ||
390 | TILE_OPC_SW, | ||
391 | TILE_OPC_SWADD, | ||
392 | TILE_OPC_SWINT0, | ||
393 | TILE_OPC_SWINT1, | ||
394 | TILE_OPC_SWINT2, | ||
395 | TILE_OPC_SWINT3, | ||
396 | TILE_OPC_TBLIDXB0, | ||
397 | TILE_OPC_TBLIDXB0_SN, | ||
398 | TILE_OPC_TBLIDXB1, | ||
399 | TILE_OPC_TBLIDXB1_SN, | ||
400 | TILE_OPC_TBLIDXB2, | ||
401 | TILE_OPC_TBLIDXB2_SN, | ||
402 | TILE_OPC_TBLIDXB3, | ||
403 | TILE_OPC_TBLIDXB3_SN, | ||
404 | TILE_OPC_TNS, | ||
405 | TILE_OPC_TNS_SN, | ||
406 | TILE_OPC_WH64, | ||
407 | TILE_OPC_XOR, | ||
408 | TILE_OPC_XOR_SN, | ||
409 | TILE_OPC_XORI, | ||
410 | TILE_OPC_XORI_SN, | ||
411 | TILE_OPC_NONE | ||
412 | } tile_mnemonic; | ||
413 | |||
414 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
415 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
416 | |||
417 | |||
418 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
419 | |||
420 | #define TILE_ELF_NAME "elf32-tilepro" | ||
421 | |||
422 | enum | ||
423 | { | ||
424 | TILE_SN_MAX_OPERANDS = 6 /* route */ | ||
425 | }; | ||
426 | |||
427 | typedef enum | ||
428 | { | ||
429 | TILE_SN_OPC_BZ, | ||
430 | TILE_SN_OPC_BNZ, | ||
431 | TILE_SN_OPC_JRR, | ||
432 | TILE_SN_OPC_FNOP, | ||
433 | TILE_SN_OPC_BLZ, | ||
434 | TILE_SN_OPC_NOP, | ||
435 | TILE_SN_OPC_MOVEI, | ||
436 | TILE_SN_OPC_MOVE, | ||
437 | TILE_SN_OPC_BGEZ, | ||
438 | TILE_SN_OPC_JR, | ||
439 | TILE_SN_OPC_BLEZ, | ||
440 | TILE_SN_OPC_BBNS, | ||
441 | TILE_SN_OPC_JALRR, | ||
442 | TILE_SN_OPC_BPT, | ||
443 | TILE_SN_OPC_JALR, | ||
444 | TILE_SN_OPC_SHR1, | ||
445 | TILE_SN_OPC_BGZ, | ||
446 | TILE_SN_OPC_BBS, | ||
447 | TILE_SN_OPC_SHL8II, | ||
448 | TILE_SN_OPC_ADDI, | ||
449 | TILE_SN_OPC_HALT, | ||
450 | TILE_SN_OPC_ROUTE, | ||
451 | TILE_SN_OPC_NONE | ||
452 | } tile_sn_mnemonic; | ||
453 | |||
454 | extern const unsigned char tile_sn_route_encode[6 * 6 * 6]; | ||
455 | extern const signed char tile_sn_route_decode[256][3]; | ||
456 | extern const char tile_sn_direction_names[6][5]; | ||
457 | extern const signed char tile_sn_dest_map[6][6]; | ||
458 | |||
459 | |||
460 | static __inline unsigned int | ||
461 | get_BrOff_SN(tile_bundle_bits num) | ||
462 | { | ||
463 | const unsigned int n = (unsigned int)num; | ||
464 | return (((n >> 0)) & 0x3ff); | ||
465 | } | ||
466 | |||
467 | static __inline unsigned int | ||
468 | get_BrOff_X1(tile_bundle_bits n) | ||
469 | { | ||
470 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
471 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
472 | } | ||
473 | |||
474 | static __inline unsigned int | ||
475 | get_BrType_X1(tile_bundle_bits n) | ||
476 | { | ||
477 | return (((unsigned int)(n >> 31)) & 0xf); | ||
478 | } | ||
479 | |||
480 | static __inline unsigned int | ||
481 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
482 | { | ||
483 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
484 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
485 | } | ||
486 | |||
487 | static __inline unsigned int | ||
488 | get_Dest_SN(tile_bundle_bits num) | ||
489 | { | ||
490 | const unsigned int n = (unsigned int)num; | ||
491 | return (((n >> 2)) & 0x3); | ||
492 | } | ||
493 | |||
494 | static __inline unsigned int | ||
495 | get_Dest_X0(tile_bundle_bits num) | ||
496 | { | ||
497 | const unsigned int n = (unsigned int)num; | ||
498 | return (((n >> 0)) & 0x3f); | ||
499 | } | ||
500 | |||
501 | static __inline unsigned int | ||
502 | get_Dest_X1(tile_bundle_bits n) | ||
503 | { | ||
504 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
505 | } | ||
506 | |||
507 | static __inline unsigned int | ||
508 | get_Dest_Y0(tile_bundle_bits num) | ||
509 | { | ||
510 | const unsigned int n = (unsigned int)num; | ||
511 | return (((n >> 0)) & 0x3f); | ||
512 | } | ||
513 | |||
514 | static __inline unsigned int | ||
515 | get_Dest_Y1(tile_bundle_bits n) | ||
516 | { | ||
517 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
518 | } | ||
519 | |||
520 | static __inline unsigned int | ||
521 | get_Imm16_X0(tile_bundle_bits num) | ||
522 | { | ||
523 | const unsigned int n = (unsigned int)num; | ||
524 | return (((n >> 12)) & 0xffff); | ||
525 | } | ||
526 | |||
527 | static __inline unsigned int | ||
528 | get_Imm16_X1(tile_bundle_bits n) | ||
529 | { | ||
530 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
531 | } | ||
532 | |||
533 | static __inline unsigned int | ||
534 | get_Imm8_SN(tile_bundle_bits num) | ||
535 | { | ||
536 | const unsigned int n = (unsigned int)num; | ||
537 | return (((n >> 0)) & 0xff); | ||
538 | } | ||
539 | |||
540 | static __inline unsigned int | ||
541 | get_Imm8_X0(tile_bundle_bits num) | ||
542 | { | ||
543 | const unsigned int n = (unsigned int)num; | ||
544 | return (((n >> 12)) & 0xff); | ||
545 | } | ||
546 | |||
547 | static __inline unsigned int | ||
548 | get_Imm8_X1(tile_bundle_bits n) | ||
549 | { | ||
550 | return (((unsigned int)(n >> 43)) & 0xff); | ||
551 | } | ||
552 | |||
553 | static __inline unsigned int | ||
554 | get_Imm8_Y0(tile_bundle_bits num) | ||
555 | { | ||
556 | const unsigned int n = (unsigned int)num; | ||
557 | return (((n >> 12)) & 0xff); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_Imm8_Y1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0xff); | ||
564 | } | ||
565 | |||
566 | static __inline unsigned int | ||
567 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
568 | { | ||
569 | const unsigned int n = (unsigned int)num; | ||
570 | return (((n >> 20)) & 0x7f); | ||
571 | } | ||
572 | |||
573 | static __inline unsigned int | ||
574 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
575 | { | ||
576 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
577 | } | ||
578 | |||
579 | static __inline unsigned int | ||
580 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
581 | { | ||
582 | const unsigned int n = (unsigned int)num; | ||
583 | return (((n >> 8)) & 0x3); | ||
584 | } | ||
585 | |||
586 | static __inline unsigned int | ||
587 | get_JOffLong_X1(tile_bundle_bits n) | ||
588 | { | ||
589 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
590 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
591 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
592 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
593 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
594 | } | ||
595 | |||
596 | static __inline unsigned int | ||
597 | get_JOff_X1(tile_bundle_bits n) | ||
598 | { | ||
599 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
600 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
601 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
602 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
603 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
604 | } | ||
605 | |||
606 | static __inline unsigned int | ||
607 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
608 | { | ||
609 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
610 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
611 | } | ||
612 | |||
613 | static __inline unsigned int | ||
614 | get_MMEnd_X0(tile_bundle_bits num) | ||
615 | { | ||
616 | const unsigned int n = (unsigned int)num; | ||
617 | return (((n >> 18)) & 0x1f); | ||
618 | } | ||
619 | |||
620 | static __inline unsigned int | ||
621 | get_MMEnd_X1(tile_bundle_bits n) | ||
622 | { | ||
623 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
624 | } | ||
625 | |||
626 | static __inline unsigned int | ||
627 | get_MMStart_X0(tile_bundle_bits num) | ||
628 | { | ||
629 | const unsigned int n = (unsigned int)num; | ||
630 | return (((n >> 23)) & 0x1f); | ||
631 | } | ||
632 | |||
633 | static __inline unsigned int | ||
634 | get_MMStart_X1(tile_bundle_bits n) | ||
635 | { | ||
636 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
637 | } | ||
638 | |||
639 | static __inline unsigned int | ||
640 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
641 | { | ||
642 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
643 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
644 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
645 | } | ||
646 | |||
647 | static __inline unsigned int | ||
648 | get_Mode(tile_bundle_bits n) | ||
649 | { | ||
650 | return (((unsigned int)(n >> 63)) & 0x1); | ||
651 | } | ||
652 | |||
653 | static __inline unsigned int | ||
654 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
655 | { | ||
656 | const unsigned int n = (unsigned int)num; | ||
657 | return (((n >> 0)) & 0xf); | ||
658 | } | ||
659 | |||
660 | static __inline unsigned int | ||
661 | get_Opcode_SN(tile_bundle_bits num) | ||
662 | { | ||
663 | const unsigned int n = (unsigned int)num; | ||
664 | return (((n >> 10)) & 0x3f); | ||
665 | } | ||
666 | |||
667 | static __inline unsigned int | ||
668 | get_Opcode_X0(tile_bundle_bits num) | ||
669 | { | ||
670 | const unsigned int n = (unsigned int)num; | ||
671 | return (((n >> 28)) & 0x7); | ||
672 | } | ||
673 | |||
674 | static __inline unsigned int | ||
675 | get_Opcode_X1(tile_bundle_bits n) | ||
676 | { | ||
677 | return (((unsigned int)(n >> 59)) & 0xf); | ||
678 | } | ||
679 | |||
680 | static __inline unsigned int | ||
681 | get_Opcode_Y0(tile_bundle_bits num) | ||
682 | { | ||
683 | const unsigned int n = (unsigned int)num; | ||
684 | return (((n >> 27)) & 0xf); | ||
685 | } | ||
686 | |||
687 | static __inline unsigned int | ||
688 | get_Opcode_Y1(tile_bundle_bits n) | ||
689 | { | ||
690 | return (((unsigned int)(n >> 59)) & 0xf); | ||
691 | } | ||
692 | |||
693 | static __inline unsigned int | ||
694 | get_Opcode_Y2(tile_bundle_bits n) | ||
695 | { | ||
696 | return (((unsigned int)(n >> 56)) & 0x7); | ||
697 | } | ||
698 | |||
699 | static __inline unsigned int | ||
700 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
701 | { | ||
702 | const unsigned int n = (unsigned int)num; | ||
703 | return (((n >> 4)) & 0xf); | ||
704 | } | ||
705 | |||
706 | static __inline unsigned int | ||
707 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
708 | { | ||
709 | const unsigned int n = (unsigned int)num; | ||
710 | return (((n >> 18)) & 0x1ff); | ||
711 | } | ||
712 | |||
713 | static __inline unsigned int | ||
714 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
715 | { | ||
716 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
717 | } | ||
718 | |||
719 | static __inline unsigned int | ||
720 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
721 | { | ||
722 | const unsigned int n = (unsigned int)num; | ||
723 | return (((n >> 18)) & 0x3); | ||
724 | } | ||
725 | |||
726 | static __inline unsigned int | ||
727 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
728 | { | ||
729 | return (((unsigned int)(n >> 49)) & 0x3); | ||
730 | } | ||
731 | |||
732 | static __inline unsigned int | ||
733 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
734 | { | ||
735 | const unsigned int n = (unsigned int)num; | ||
736 | return (((n >> 0)) & 0x3ff); | ||
737 | } | ||
738 | |||
739 | static __inline unsigned int | ||
740 | get_S_X0(tile_bundle_bits num) | ||
741 | { | ||
742 | const unsigned int n = (unsigned int)num; | ||
743 | return (((n >> 27)) & 0x1); | ||
744 | } | ||
745 | |||
746 | static __inline unsigned int | ||
747 | get_S_X1(tile_bundle_bits n) | ||
748 | { | ||
749 | return (((unsigned int)(n >> 58)) & 0x1); | ||
750 | } | ||
751 | |||
752 | static __inline unsigned int | ||
753 | get_ShAmt_X0(tile_bundle_bits num) | ||
754 | { | ||
755 | const unsigned int n = (unsigned int)num; | ||
756 | return (((n >> 12)) & 0x1f); | ||
757 | } | ||
758 | |||
759 | static __inline unsigned int | ||
760 | get_ShAmt_X1(tile_bundle_bits n) | ||
761 | { | ||
762 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
763 | } | ||
764 | |||
765 | static __inline unsigned int | ||
766 | get_ShAmt_Y0(tile_bundle_bits num) | ||
767 | { | ||
768 | const unsigned int n = (unsigned int)num; | ||
769 | return (((n >> 12)) & 0x1f); | ||
770 | } | ||
771 | |||
772 | static __inline unsigned int | ||
773 | get_ShAmt_Y1(tile_bundle_bits n) | ||
774 | { | ||
775 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
776 | } | ||
777 | |||
778 | static __inline unsigned int | ||
779 | get_SrcA_X0(tile_bundle_bits num) | ||
780 | { | ||
781 | const unsigned int n = (unsigned int)num; | ||
782 | return (((n >> 6)) & 0x3f); | ||
783 | } | ||
784 | |||
785 | static __inline unsigned int | ||
786 | get_SrcA_X1(tile_bundle_bits n) | ||
787 | { | ||
788 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
789 | } | ||
790 | |||
791 | static __inline unsigned int | ||
792 | get_SrcA_Y0(tile_bundle_bits num) | ||
793 | { | ||
794 | const unsigned int n = (unsigned int)num; | ||
795 | return (((n >> 6)) & 0x3f); | ||
796 | } | ||
797 | |||
798 | static __inline unsigned int | ||
799 | get_SrcA_Y1(tile_bundle_bits n) | ||
800 | { | ||
801 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
802 | } | ||
803 | |||
804 | static __inline unsigned int | ||
805 | get_SrcA_Y2(tile_bundle_bits n) | ||
806 | { | ||
807 | return (((n >> 26)) & 0x00000001) | | ||
808 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
809 | } | ||
810 | |||
811 | static __inline unsigned int | ||
812 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
813 | { | ||
814 | const unsigned int n = (unsigned int)num; | ||
815 | return (((n >> 20)) & 0x3f); | ||
816 | } | ||
817 | |||
818 | static __inline unsigned int | ||
819 | get_SrcB_X0(tile_bundle_bits num) | ||
820 | { | ||
821 | const unsigned int n = (unsigned int)num; | ||
822 | return (((n >> 12)) & 0x3f); | ||
823 | } | ||
824 | |||
825 | static __inline unsigned int | ||
826 | get_SrcB_X1(tile_bundle_bits n) | ||
827 | { | ||
828 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
829 | } | ||
830 | |||
831 | static __inline unsigned int | ||
832 | get_SrcB_Y0(tile_bundle_bits num) | ||
833 | { | ||
834 | const unsigned int n = (unsigned int)num; | ||
835 | return (((n >> 12)) & 0x3f); | ||
836 | } | ||
837 | |||
838 | static __inline unsigned int | ||
839 | get_SrcB_Y1(tile_bundle_bits n) | ||
840 | { | ||
841 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
842 | } | ||
843 | |||
844 | static __inline unsigned int | ||
845 | get_Src_SN(tile_bundle_bits num) | ||
846 | { | ||
847 | const unsigned int n = (unsigned int)num; | ||
848 | return (((n >> 0)) & 0x3); | ||
849 | } | ||
850 | |||
851 | static __inline unsigned int | ||
852 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
853 | { | ||
854 | const unsigned int n = (unsigned int)num; | ||
855 | return (((n >> 12)) & 0x1f); | ||
856 | } | ||
857 | |||
858 | static __inline unsigned int | ||
859 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
860 | { | ||
861 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
862 | } | ||
863 | |||
864 | static __inline unsigned int | ||
865 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
866 | { | ||
867 | const unsigned int n = (unsigned int)num; | ||
868 | return (((n >> 12)) & 0x1f); | ||
869 | } | ||
870 | |||
871 | static __inline unsigned int | ||
872 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
873 | { | ||
874 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
875 | } | ||
876 | |||
877 | static __inline unsigned int | ||
878 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return (((n >> 17)) & 0x3ff); | ||
882 | } | ||
883 | |||
884 | static __inline unsigned int | ||
885 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
886 | { | ||
887 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
888 | } | ||
889 | |||
890 | static __inline unsigned int | ||
891 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
892 | { | ||
893 | const unsigned int n = (unsigned int)num; | ||
894 | return (((n >> 17)) & 0x7); | ||
895 | } | ||
896 | |||
897 | static __inline unsigned int | ||
898 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
899 | { | ||
900 | return (((unsigned int)(n >> 48)) & 0x7); | ||
901 | } | ||
902 | |||
903 | |||
904 | static __inline int | ||
905 | sign_extend(int n, int num_bits) | ||
906 | { | ||
907 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
908 | return (n << shift) >> shift; | ||
909 | } | ||
910 | |||
911 | |||
912 | |||
913 | static __inline tile_bundle_bits | ||
914 | create_BrOff_SN(int num) | ||
915 | { | ||
916 | const unsigned int n = (unsigned int)num; | ||
917 | return ((n & 0x3ff) << 0); | ||
918 | } | ||
919 | |||
920 | static __inline tile_bundle_bits | ||
921 | create_BrOff_X1(int num) | ||
922 | { | ||
923 | const unsigned int n = (unsigned int)num; | ||
924 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
925 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_BrType_X1(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Imm8_X1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
940 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
941 | } | ||
942 | |||
943 | static __inline tile_bundle_bits | ||
944 | create_Dest_SN(int num) | ||
945 | { | ||
946 | const unsigned int n = (unsigned int)num; | ||
947 | return ((n & 0x3) << 2); | ||
948 | } | ||
949 | |||
950 | static __inline tile_bundle_bits | ||
951 | create_Dest_X0(int num) | ||
952 | { | ||
953 | const unsigned int n = (unsigned int)num; | ||
954 | return ((n & 0x3f) << 0); | ||
955 | } | ||
956 | |||
957 | static __inline tile_bundle_bits | ||
958 | create_Dest_X1(int num) | ||
959 | { | ||
960 | const unsigned int n = (unsigned int)num; | ||
961 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
962 | } | ||
963 | |||
964 | static __inline tile_bundle_bits | ||
965 | create_Dest_Y0(int num) | ||
966 | { | ||
967 | const unsigned int n = (unsigned int)num; | ||
968 | return ((n & 0x3f) << 0); | ||
969 | } | ||
970 | |||
971 | static __inline tile_bundle_bits | ||
972 | create_Dest_Y1(int num) | ||
973 | { | ||
974 | const unsigned int n = (unsigned int)num; | ||
975 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
976 | } | ||
977 | |||
978 | static __inline tile_bundle_bits | ||
979 | create_Imm16_X0(int num) | ||
980 | { | ||
981 | const unsigned int n = (unsigned int)num; | ||
982 | return ((n & 0xffff) << 12); | ||
983 | } | ||
984 | |||
985 | static __inline tile_bundle_bits | ||
986 | create_Imm16_X1(int num) | ||
987 | { | ||
988 | const unsigned int n = (unsigned int)num; | ||
989 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
990 | } | ||
991 | |||
992 | static __inline tile_bundle_bits | ||
993 | create_Imm8_SN(int num) | ||
994 | { | ||
995 | const unsigned int n = (unsigned int)num; | ||
996 | return ((n & 0xff) << 0); | ||
997 | } | ||
998 | |||
999 | static __inline tile_bundle_bits | ||
1000 | create_Imm8_X0(int num) | ||
1001 | { | ||
1002 | const unsigned int n = (unsigned int)num; | ||
1003 | return ((n & 0xff) << 12); | ||
1004 | } | ||
1005 | |||
1006 | static __inline tile_bundle_bits | ||
1007 | create_Imm8_X1(int num) | ||
1008 | { | ||
1009 | const unsigned int n = (unsigned int)num; | ||
1010 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1011 | } | ||
1012 | |||
1013 | static __inline tile_bundle_bits | ||
1014 | create_Imm8_Y0(int num) | ||
1015 | { | ||
1016 | const unsigned int n = (unsigned int)num; | ||
1017 | return ((n & 0xff) << 12); | ||
1018 | } | ||
1019 | |||
1020 | static __inline tile_bundle_bits | ||
1021 | create_Imm8_Y1(int num) | ||
1022 | { | ||
1023 | const unsigned int n = (unsigned int)num; | ||
1024 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1025 | } | ||
1026 | |||
1027 | static __inline tile_bundle_bits | ||
1028 | create_ImmOpcodeExtension_X0(int num) | ||
1029 | { | ||
1030 | const unsigned int n = (unsigned int)num; | ||
1031 | return ((n & 0x7f) << 20); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_ImmOpcodeExtension_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1039 | } | ||
1040 | |||
1041 | static __inline tile_bundle_bits | ||
1042 | create_ImmRROpcodeExtension_SN(int num) | ||
1043 | { | ||
1044 | const unsigned int n = (unsigned int)num; | ||
1045 | return ((n & 0x3) << 8); | ||
1046 | } | ||
1047 | |||
1048 | static __inline tile_bundle_bits | ||
1049 | create_JOffLong_X1(int num) | ||
1050 | { | ||
1051 | const unsigned int n = (unsigned int)num; | ||
1052 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1053 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1054 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1055 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1056 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1057 | } | ||
1058 | |||
1059 | static __inline tile_bundle_bits | ||
1060 | create_JOff_X1(int num) | ||
1061 | { | ||
1062 | const unsigned int n = (unsigned int)num; | ||
1063 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1064 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1065 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1066 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1067 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MF_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1075 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1076 | } | ||
1077 | |||
1078 | static __inline tile_bundle_bits | ||
1079 | create_MMEnd_X0(int num) | ||
1080 | { | ||
1081 | const unsigned int n = (unsigned int)num; | ||
1082 | return ((n & 0x1f) << 18); | ||
1083 | } | ||
1084 | |||
1085 | static __inline tile_bundle_bits | ||
1086 | create_MMEnd_X1(int num) | ||
1087 | { | ||
1088 | const unsigned int n = (unsigned int)num; | ||
1089 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1090 | } | ||
1091 | |||
1092 | static __inline tile_bundle_bits | ||
1093 | create_MMStart_X0(int num) | ||
1094 | { | ||
1095 | const unsigned int n = (unsigned int)num; | ||
1096 | return ((n & 0x1f) << 23); | ||
1097 | } | ||
1098 | |||
1099 | static __inline tile_bundle_bits | ||
1100 | create_MMStart_X1(int num) | ||
1101 | { | ||
1102 | const unsigned int n = (unsigned int)num; | ||
1103 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1104 | } | ||
1105 | |||
1106 | static __inline tile_bundle_bits | ||
1107 | create_MT_Imm15_X1(int num) | ||
1108 | { | ||
1109 | const unsigned int n = (unsigned int)num; | ||
1110 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1111 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1112 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1113 | } | ||
1114 | |||
1115 | static __inline tile_bundle_bits | ||
1116 | create_Mode(int num) | ||
1117 | { | ||
1118 | const unsigned int n = (unsigned int)num; | ||
1119 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1120 | } | ||
1121 | |||
1122 | static __inline tile_bundle_bits | ||
1123 | create_NoRegOpcodeExtension_SN(int num) | ||
1124 | { | ||
1125 | const unsigned int n = (unsigned int)num; | ||
1126 | return ((n & 0xf) << 0); | ||
1127 | } | ||
1128 | |||
1129 | static __inline tile_bundle_bits | ||
1130 | create_Opcode_SN(int num) | ||
1131 | { | ||
1132 | const unsigned int n = (unsigned int)num; | ||
1133 | return ((n & 0x3f) << 10); | ||
1134 | } | ||
1135 | |||
1136 | static __inline tile_bundle_bits | ||
1137 | create_Opcode_X0(int num) | ||
1138 | { | ||
1139 | const unsigned int n = (unsigned int)num; | ||
1140 | return ((n & 0x7) << 28); | ||
1141 | } | ||
1142 | |||
1143 | static __inline tile_bundle_bits | ||
1144 | create_Opcode_X1(int num) | ||
1145 | { | ||
1146 | const unsigned int n = (unsigned int)num; | ||
1147 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1148 | } | ||
1149 | |||
1150 | static __inline tile_bundle_bits | ||
1151 | create_Opcode_Y0(int num) | ||
1152 | { | ||
1153 | const unsigned int n = (unsigned int)num; | ||
1154 | return ((n & 0xf) << 27); | ||
1155 | } | ||
1156 | |||
1157 | static __inline tile_bundle_bits | ||
1158 | create_Opcode_Y1(int num) | ||
1159 | { | ||
1160 | const unsigned int n = (unsigned int)num; | ||
1161 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1162 | } | ||
1163 | |||
1164 | static __inline tile_bundle_bits | ||
1165 | create_Opcode_Y2(int num) | ||
1166 | { | ||
1167 | const unsigned int n = (unsigned int)num; | ||
1168 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1169 | } | ||
1170 | |||
1171 | static __inline tile_bundle_bits | ||
1172 | create_RROpcodeExtension_SN(int num) | ||
1173 | { | ||
1174 | const unsigned int n = (unsigned int)num; | ||
1175 | return ((n & 0xf) << 4); | ||
1176 | } | ||
1177 | |||
1178 | static __inline tile_bundle_bits | ||
1179 | create_RRROpcodeExtension_X0(int num) | ||
1180 | { | ||
1181 | const unsigned int n = (unsigned int)num; | ||
1182 | return ((n & 0x1ff) << 18); | ||
1183 | } | ||
1184 | |||
1185 | static __inline tile_bundle_bits | ||
1186 | create_RRROpcodeExtension_X1(int num) | ||
1187 | { | ||
1188 | const unsigned int n = (unsigned int)num; | ||
1189 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1190 | } | ||
1191 | |||
1192 | static __inline tile_bundle_bits | ||
1193 | create_RRROpcodeExtension_Y0(int num) | ||
1194 | { | ||
1195 | const unsigned int n = (unsigned int)num; | ||
1196 | return ((n & 0x3) << 18); | ||
1197 | } | ||
1198 | |||
1199 | static __inline tile_bundle_bits | ||
1200 | create_RRROpcodeExtension_Y1(int num) | ||
1201 | { | ||
1202 | const unsigned int n = (unsigned int)num; | ||
1203 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1204 | } | ||
1205 | |||
1206 | static __inline tile_bundle_bits | ||
1207 | create_RouteOpcodeExtension_SN(int num) | ||
1208 | { | ||
1209 | const unsigned int n = (unsigned int)num; | ||
1210 | return ((n & 0x3ff) << 0); | ||
1211 | } | ||
1212 | |||
1213 | static __inline tile_bundle_bits | ||
1214 | create_S_X0(int num) | ||
1215 | { | ||
1216 | const unsigned int n = (unsigned int)num; | ||
1217 | return ((n & 0x1) << 27); | ||
1218 | } | ||
1219 | |||
1220 | static __inline tile_bundle_bits | ||
1221 | create_S_X1(int num) | ||
1222 | { | ||
1223 | const unsigned int n = (unsigned int)num; | ||
1224 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1225 | } | ||
1226 | |||
1227 | static __inline tile_bundle_bits | ||
1228 | create_ShAmt_X0(int num) | ||
1229 | { | ||
1230 | const unsigned int n = (unsigned int)num; | ||
1231 | return ((n & 0x1f) << 12); | ||
1232 | } | ||
1233 | |||
1234 | static __inline tile_bundle_bits | ||
1235 | create_ShAmt_X1(int num) | ||
1236 | { | ||
1237 | const unsigned int n = (unsigned int)num; | ||
1238 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1239 | } | ||
1240 | |||
1241 | static __inline tile_bundle_bits | ||
1242 | create_ShAmt_Y0(int num) | ||
1243 | { | ||
1244 | const unsigned int n = (unsigned int)num; | ||
1245 | return ((n & 0x1f) << 12); | ||
1246 | } | ||
1247 | |||
1248 | static __inline tile_bundle_bits | ||
1249 | create_ShAmt_Y1(int num) | ||
1250 | { | ||
1251 | const unsigned int n = (unsigned int)num; | ||
1252 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcA_X0(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 6); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcA_X1(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcA_Y0(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return ((n & 0x3f) << 6); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcA_Y1(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcA_Y2(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return ((n & 0x00000001) << 26) | | ||
1288 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1289 | } | ||
1290 | |||
1291 | static __inline tile_bundle_bits | ||
1292 | create_SrcBDest_Y2(int num) | ||
1293 | { | ||
1294 | const unsigned int n = (unsigned int)num; | ||
1295 | return ((n & 0x3f) << 20); | ||
1296 | } | ||
1297 | |||
1298 | static __inline tile_bundle_bits | ||
1299 | create_SrcB_X0(int num) | ||
1300 | { | ||
1301 | const unsigned int n = (unsigned int)num; | ||
1302 | return ((n & 0x3f) << 12); | ||
1303 | } | ||
1304 | |||
1305 | static __inline tile_bundle_bits | ||
1306 | create_SrcB_X1(int num) | ||
1307 | { | ||
1308 | const unsigned int n = (unsigned int)num; | ||
1309 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1310 | } | ||
1311 | |||
1312 | static __inline tile_bundle_bits | ||
1313 | create_SrcB_Y0(int num) | ||
1314 | { | ||
1315 | const unsigned int n = (unsigned int)num; | ||
1316 | return ((n & 0x3f) << 12); | ||
1317 | } | ||
1318 | |||
1319 | static __inline tile_bundle_bits | ||
1320 | create_SrcB_Y1(int num) | ||
1321 | { | ||
1322 | const unsigned int n = (unsigned int)num; | ||
1323 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1324 | } | ||
1325 | |||
1326 | static __inline tile_bundle_bits | ||
1327 | create_Src_SN(int num) | ||
1328 | { | ||
1329 | const unsigned int n = (unsigned int)num; | ||
1330 | return ((n & 0x3) << 0); | ||
1331 | } | ||
1332 | |||
1333 | static __inline tile_bundle_bits | ||
1334 | create_UnOpcodeExtension_X0(int num) | ||
1335 | { | ||
1336 | const unsigned int n = (unsigned int)num; | ||
1337 | return ((n & 0x1f) << 12); | ||
1338 | } | ||
1339 | |||
1340 | static __inline tile_bundle_bits | ||
1341 | create_UnOpcodeExtension_X1(int num) | ||
1342 | { | ||
1343 | const unsigned int n = (unsigned int)num; | ||
1344 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1345 | } | ||
1346 | |||
1347 | static __inline tile_bundle_bits | ||
1348 | create_UnOpcodeExtension_Y0(int num) | ||
1349 | { | ||
1350 | const unsigned int n = (unsigned int)num; | ||
1351 | return ((n & 0x1f) << 12); | ||
1352 | } | ||
1353 | |||
1354 | static __inline tile_bundle_bits | ||
1355 | create_UnOpcodeExtension_Y1(int num) | ||
1356 | { | ||
1357 | const unsigned int n = (unsigned int)num; | ||
1358 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1359 | } | ||
1360 | |||
1361 | static __inline tile_bundle_bits | ||
1362 | create_UnShOpcodeExtension_X0(int num) | ||
1363 | { | ||
1364 | const unsigned int n = (unsigned int)num; | ||
1365 | return ((n & 0x3ff) << 17); | ||
1366 | } | ||
1367 | |||
1368 | static __inline tile_bundle_bits | ||
1369 | create_UnShOpcodeExtension_X1(int num) | ||
1370 | { | ||
1371 | const unsigned int n = (unsigned int)num; | ||
1372 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1373 | } | ||
1374 | |||
1375 | static __inline tile_bundle_bits | ||
1376 | create_UnShOpcodeExtension_Y0(int num) | ||
1377 | { | ||
1378 | const unsigned int n = (unsigned int)num; | ||
1379 | return ((n & 0x7) << 17); | ||
1380 | } | ||
1381 | |||
1382 | static __inline tile_bundle_bits | ||
1383 | create_UnShOpcodeExtension_Y1(int num) | ||
1384 | { | ||
1385 | const unsigned int n = (unsigned int)num; | ||
1386 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1387 | } | ||
1388 | |||
1389 | |||
1390 | typedef unsigned short tile_sn_instruction_bits; | ||
1391 | |||
1392 | |||
1393 | typedef enum | ||
1394 | { | ||
1395 | TILE_PIPELINE_X0, | ||
1396 | TILE_PIPELINE_X1, | ||
1397 | TILE_PIPELINE_Y0, | ||
1398 | TILE_PIPELINE_Y1, | ||
1399 | TILE_PIPELINE_Y2, | ||
1400 | } tile_pipeline; | ||
1401 | |||
1402 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1403 | |||
1404 | typedef enum | ||
1405 | { | ||
1406 | TILE_OP_TYPE_REGISTER, | ||
1407 | TILE_OP_TYPE_IMMEDIATE, | ||
1408 | TILE_OP_TYPE_ADDRESS, | ||
1409 | TILE_OP_TYPE_SPR | ||
1410 | } tile_operand_type; | ||
1411 | |||
1412 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1413 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1414 | |||
1415 | enum | ||
1416 | { | ||
1417 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1418 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1419 | |||
1420 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1421 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1422 | |||
1423 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1424 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1425 | |||
1426 | /* Instructions take this many bytes. */ | ||
1427 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1428 | |||
1429 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1430 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1431 | |||
1432 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1433 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1434 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1435 | |||
1436 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1437 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1438 | |||
1439 | /* Static network instructions take this many bytes. */ | ||
1440 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1441 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1442 | |||
1443 | /* Number of registers (some are magic, such as network I/O). */ | ||
1444 | TILE_NUM_REGISTERS = 64, | ||
1445 | |||
1446 | /* Number of static network registers. */ | ||
1447 | TILE_NUM_SN_REGISTERS = 4 | ||
1448 | }; | ||
1449 | |||
1450 | |||
1451 | struct tile_operand | ||
1452 | { | ||
1453 | /* Is this operand a register, immediate or address? */ | ||
1454 | tile_operand_type type; | ||
1455 | |||
1456 | /* The default relocation type for this operand. */ | ||
1457 | signed int default_reloc : 16; | ||
1458 | |||
1459 | /* How many bits is this value? (used for range checking) */ | ||
1460 | unsigned int num_bits : 5; | ||
1461 | |||
1462 | /* Is the value signed? (used for range checking) */ | ||
1463 | unsigned int is_signed : 1; | ||
1464 | |||
1465 | /* Is this operand a source register? */ | ||
1466 | unsigned int is_src_reg : 1; | ||
1467 | |||
1468 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1469 | unsigned int is_dest_reg : 1; | ||
1470 | |||
1471 | /* Is this operand PC-relative? */ | ||
1472 | unsigned int is_pc_relative : 1; | ||
1473 | |||
1474 | /* By how many bits do we right shift the value before inserting? */ | ||
1475 | unsigned int rightshift : 2; | ||
1476 | |||
1477 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1478 | tile_bundle_bits (*insert) (int op); | ||
1479 | |||
1480 | /* Extract this operand and return it. */ | ||
1481 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1482 | }; | ||
1483 | |||
1484 | |||
1485 | extern const struct tile_operand tile_operands[]; | ||
1486 | |||
1487 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1488 | extern const unsigned short * const | ||
1489 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1490 | |||
1491 | |||
1492 | struct tile_opcode | ||
1493 | { | ||
1494 | /* The opcode mnemonic, e.g. "add" */ | ||
1495 | const char *name; | ||
1496 | |||
1497 | /* The enum value for this mnemonic. */ | ||
1498 | tile_mnemonic mnemonic; | ||
1499 | |||
1500 | /* A bit mask of which of the five pipes this instruction | ||
1501 | is compatible with: | ||
1502 | X0 0x01 | ||
1503 | X1 0x02 | ||
1504 | Y0 0x04 | ||
1505 | Y1 0x08 | ||
1506 | Y2 0x10 */ | ||
1507 | unsigned char pipes; | ||
1508 | |||
1509 | /* How many operands are there? */ | ||
1510 | unsigned char num_operands; | ||
1511 | |||
1512 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1513 | unsigned char implicitly_written_register; | ||
1514 | |||
1515 | /* Can this be bundled with other instructions (almost always true). */ | ||
1516 | unsigned char can_bundle; | ||
1517 | |||
1518 | /* The description of the operands. Each of these is an | ||
1519 | * index into the tile_operands[] table. */ | ||
1520 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1521 | |||
1522 | /* A mask of which bits have predefined values for each pipeline. | ||
1523 | * This is useful for disassembly. */ | ||
1524 | tile_bundle_bits fixed_bit_masks[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1525 | |||
1526 | /* For each bit set in fixed_bit_masks, what the value is for this | ||
1527 | * instruction. */ | ||
1528 | tile_bundle_bits fixed_bit_values[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1529 | }; | ||
1530 | |||
1531 | extern const struct tile_opcode tile_opcodes[]; | ||
1532 | |||
1533 | struct tile_sn_opcode | ||
1534 | { | ||
1535 | /* The opcode mnemonic, e.g. "add" */ | ||
1536 | const char *name; | ||
1537 | |||
1538 | /* The enum value for this mnemonic. */ | ||
1539 | tile_sn_mnemonic mnemonic; | ||
1540 | |||
1541 | /* How many operands are there? */ | ||
1542 | unsigned char num_operands; | ||
1543 | |||
1544 | /* The description of the operands. Each of these is an | ||
1545 | * index into the tile_operands[] table. */ | ||
1546 | unsigned char operands[TILE_SN_MAX_OPERANDS]; | ||
1547 | |||
1548 | /* A mask of which bits have predefined values. | ||
1549 | * This is useful for disassembly. */ | ||
1550 | tile_sn_instruction_bits fixed_bit_mask; | ||
1551 | |||
1552 | /* For each bit set in fixed_bit_masks, what its value is. */ | ||
1553 | tile_sn_instruction_bits fixed_bit_values; | ||
1554 | }; | ||
1555 | |||
1556 | extern const struct tile_sn_opcode tile_sn_opcodes[]; | ||
1557 | |||
1558 | /* Used for non-textual disassembly into structs. */ | ||
1559 | struct tile_decoded_instruction | ||
1560 | { | ||
1561 | const struct tile_opcode *opcode; | ||
1562 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1563 | int operand_values[TILE_MAX_OPERANDS]; | ||
1564 | }; | ||
1565 | |||
1566 | |||
1567 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1568 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1569 | unsigned int pc, | ||
1570 | struct tile_decoded_instruction | ||
1571 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1572 | |||
1573 | |||
1574 | /* Canonical names of all the registers. */ | ||
1575 | /* ISSUE: This table lives in "tile-dis.c" */ | ||
1576 | extern const char * const tile_register_names[]; | ||
1577 | |||
1578 | /* Descriptor for a special-purpose register. */ | ||
1579 | struct tile_spr | ||
1580 | { | ||
1581 | /* The number */ | ||
1582 | int number; | ||
1583 | |||
1584 | /* The name */ | ||
1585 | const char *name; | ||
1586 | }; | ||
1587 | |||
1588 | /* List of all the SPRs; ordered by increasing number. */ | ||
1589 | extern const struct tile_spr tile_sprs[]; | ||
1590 | |||
1591 | /* Number of special-purpose registers. */ | ||
1592 | extern const int tile_num_sprs; | ||
1593 | |||
1594 | extern const char * | ||
1595 | get_tile_spr_name (int num); | ||
1596 | |||
1597 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode_constants.h b/arch/tile/include/asm/opcode_constants.h new file mode 100644 index 000000000000..37a9f2958cb1 --- /dev/null +++ b/arch/tile/include/asm/opcode_constants.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_CONSTANTS_H | ||
16 | #define _ASM_TILE_OPCODE_CONSTANTS_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode_constants_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode_constants_32.h> | ||
24 | #endif | ||
25 | |||
26 | #endif /* _ASM_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_32.h b/arch/tile/include/asm/opcode_constants_32.h new file mode 100644 index 000000000000..227d033b180c --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_32.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h new file mode 100644 index 000000000000..227d033b180c --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_64.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h new file mode 100644 index 000000000000..c8301c43d6d9 --- /dev/null +++ b/arch/tile/include/asm/page.h | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PAGE_H | ||
16 | #define _ASM_TILE_PAGE_H | ||
17 | |||
18 | #include <linux/const.h> | ||
19 | #include <hv/hypervisor.h> | ||
20 | #include <arch/chip.h> | ||
21 | |||
22 | /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ | ||
23 | #define PAGE_SHIFT 16 | ||
24 | #define HPAGE_SHIFT 24 | ||
25 | |||
26 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | ||
27 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
28 | |||
29 | #define PAGE_MASK (~(PAGE_SIZE - 1)) | ||
30 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
31 | |||
32 | /* | ||
33 | * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx | ||
34 | * definitions in <hv/hypervisor.h>. We validate this at build time | ||
35 | * here, and again at runtime during early boot. We provide a | ||
36 | * separate definition since userspace doesn't have <hv/hypervisor.h>. | ||
37 | * | ||
38 | * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since | ||
39 | * they are the same on i386 but not TILE. | ||
40 | */ | ||
41 | #if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT | ||
42 | # error Small page size mismatch in Linux | ||
43 | #endif | ||
44 | #if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT | ||
45 | # error Huge page size mismatch in Linux | ||
46 | #endif | ||
47 | |||
48 | #ifndef __ASSEMBLY__ | ||
49 | |||
50 | #include <linux/types.h> | ||
51 | #include <linux/string.h> | ||
52 | |||
53 | struct page; | ||
54 | |||
55 | static inline void clear_page(void *page) | ||
56 | { | ||
57 | memset(page, 0, PAGE_SIZE); | ||
58 | } | ||
59 | |||
60 | static inline void copy_page(void *to, void *from) | ||
61 | { | ||
62 | memcpy(to, from, PAGE_SIZE); | ||
63 | } | ||
64 | |||
65 | static inline void clear_user_page(void *page, unsigned long vaddr, | ||
66 | struct page *pg) | ||
67 | { | ||
68 | clear_page(page); | ||
69 | } | ||
70 | |||
71 | static inline void copy_user_page(void *to, void *from, unsigned long vaddr, | ||
72 | struct page *topage) | ||
73 | { | ||
74 | copy_page(to, from); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Hypervisor page tables are made of the same basic structure. | ||
79 | */ | ||
80 | |||
81 | typedef __u64 pteval_t; | ||
82 | typedef __u64 pmdval_t; | ||
83 | typedef __u64 pudval_t; | ||
84 | typedef __u64 pgdval_t; | ||
85 | typedef __u64 pgprotval_t; | ||
86 | |||
87 | typedef HV_PTE pte_t; | ||
88 | typedef HV_PTE pgd_t; | ||
89 | typedef HV_PTE pgprot_t; | ||
90 | |||
91 | /* | ||
92 | * User L2 page tables are managed as one L2 page table per page, | ||
93 | * because we use the page allocator for them. This keeps the allocation | ||
94 | * simple and makes it potentially useful to implement HIGHPTE at some point. | ||
95 | * However, it's also inefficient, since L2 page tables are much smaller | ||
96 | * than pages (currently 2KB vs 64KB). So we should revisit this. | ||
97 | */ | ||
98 | typedef struct page *pgtable_t; | ||
99 | |||
100 | /* Must be a macro since it is used to create constants. */ | ||
101 | #define __pgprot(val) hv_pte(val) | ||
102 | |||
103 | static inline u64 pgprot_val(pgprot_t pgprot) | ||
104 | { | ||
105 | return hv_pte_val(pgprot); | ||
106 | } | ||
107 | |||
108 | static inline u64 pte_val(pte_t pte) | ||
109 | { | ||
110 | return hv_pte_val(pte); | ||
111 | } | ||
112 | |||
113 | static inline u64 pgd_val(pgd_t pgd) | ||
114 | { | ||
115 | return hv_pte_val(pgd); | ||
116 | } | ||
117 | |||
118 | #ifdef __tilegx__ | ||
119 | |||
120 | typedef HV_PTE pmd_t; | ||
121 | |||
122 | static inline u64 pmd_val(pmd_t pmd) | ||
123 | { | ||
124 | return hv_pte_val(pmd); | ||
125 | } | ||
126 | |||
127 | #endif | ||
128 | |||
129 | #endif /* !__ASSEMBLY__ */ | ||
130 | |||
131 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
132 | |||
133 | #define HUGE_MAX_HSTATE 2 | ||
134 | |||
135 | #ifdef CONFIG_HUGETLB_PAGE | ||
136 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
137 | #endif | ||
138 | |||
139 | /* Each memory controller has PAs distinct in their high bits. */ | ||
140 | #define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS()) | ||
141 | #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS()) | ||
142 | #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT) | ||
143 | #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)) | ||
144 | |||
145 | #ifdef __tilegx__ | ||
146 | |||
147 | /* | ||
148 | * We reserve the lower half of memory for user-space programs, and the | ||
149 | * upper half for system code. We re-map all of physical memory in the | ||
150 | * upper half, which takes a quarter of our VA space. Then we have | ||
151 | * the vmalloc regions. The supervisor code lives at 0xfffffff700000000, | ||
152 | * with the hypervisor above that. | ||
153 | * | ||
154 | * Loadable kernel modules are placed immediately after the static | ||
155 | * supervisor code, with each being allocated a 256MB region of | ||
156 | * address space, so we don't have to worry about the range of "jal" | ||
157 | * and other branch instructions. | ||
158 | * | ||
159 | * For now we keep life simple and just allocate one pmd (4GB) for vmalloc. | ||
160 | * Similarly, for now we don't play any struct page mapping games. | ||
161 | */ | ||
162 | |||
163 | #if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH() | ||
164 | # error Too much PA to map with the VA available! | ||
165 | #endif | ||
166 | #define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1)) | ||
167 | |||
168 | #define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ | ||
169 | #define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ | ||
170 | #define PAGE_OFFSET MEM_HIGH_START | ||
171 | #define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */ | ||
172 | #define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ | ||
173 | #define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ | ||
174 | #define MEM_SV_INTRPT MEM_SV_START | ||
175 | #define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */ | ||
176 | #define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) | ||
177 | #define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */ | ||
178 | |||
179 | /* Highest DTLB address we will use */ | ||
180 | #define KERNEL_HIGH_VADDR MEM_SV_START | ||
181 | |||
182 | /* Since we don't currently provide any fixmaps, we use an impossible VA. */ | ||
183 | #define FIXADDR_TOP MEM_HV_START | ||
184 | |||
185 | #else /* !__tilegx__ */ | ||
186 | |||
187 | /* | ||
188 | * A PAGE_OFFSET of 0xC0000000 means that the kernel has | ||
189 | * a virtual address space of one gigabyte, which limits the | ||
190 | * amount of physical memory you can use to about 768MB. | ||
191 | * If you want more physical memory than this then see the CONFIG_HIGHMEM | ||
192 | * option in the kernel configuration. | ||
193 | * | ||
194 | * The top two 16MB chunks in the table below (VIRT and HV) are | ||
195 | * unavailable to Linux. Since the kernel interrupt vectors must live | ||
196 | * at 0xfd000000, we map all of the bottom of RAM at this address with | ||
197 | * a huge page table entry to minimize its ITLB footprint (as well as | ||
198 | * at PAGE_OFFSET). The last architected requirement is that user | ||
199 | * interrupt vectors live at 0xfc000000, so we make that range of | ||
200 | * memory available to user processes. The remaining regions are sized | ||
201 | * as shown; after the first four addresses, we show "typical" values, | ||
202 | * since the actual addresses depend on kernel #defines. | ||
203 | * | ||
204 | * MEM_VIRT_INTRPT 0xff000000 | ||
205 | * MEM_HV_INTRPT 0xfe000000 | ||
206 | * MEM_SV_INTRPT (kernel code) 0xfd000000 | ||
207 | * MEM_USER_INTRPT (user vector) 0xfc000000 | ||
208 | * FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR) | ||
209 | * PKMAP_BASE 0xf7000000 (via LAST_PKMAP) | ||
210 | * HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS) | ||
211 | * VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE) | ||
212 | * mapped LOWMEM 0xc0000000 | ||
213 | */ | ||
214 | |||
215 | #define MEM_USER_INTRPT _AC(0xfc000000, UL) | ||
216 | #define MEM_SV_INTRPT _AC(0xfd000000, UL) | ||
217 | #define MEM_HV_INTRPT _AC(0xfe000000, UL) | ||
218 | #define MEM_VIRT_INTRPT _AC(0xff000000, UL) | ||
219 | |||
220 | #define INTRPT_SIZE 0x4000 | ||
221 | |||
222 | /* Tolerate page size larger than the architecture interrupt region size. */ | ||
223 | #if PAGE_SIZE > INTRPT_SIZE | ||
224 | #undef INTRPT_SIZE | ||
225 | #define INTRPT_SIZE PAGE_SIZE | ||
226 | #endif | ||
227 | |||
228 | #define KERNEL_HIGH_VADDR MEM_USER_INTRPT | ||
229 | #define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE) | ||
230 | |||
231 | #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) | ||
232 | |||
233 | /* On 32-bit architectures we mix kernel modules in with other vmaps. */ | ||
234 | #define MEM_MODULE_START VMALLOC_START | ||
235 | #define MEM_MODULE_END VMALLOC_END | ||
236 | |||
237 | #endif /* __tilegx__ */ | ||
238 | |||
239 | #ifndef __ASSEMBLY__ | ||
240 | |||
241 | #ifdef CONFIG_HIGHMEM | ||
242 | |||
243 | /* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */ | ||
244 | extern unsigned long pbase_map[]; | ||
245 | extern void *vbase_map[]; | ||
246 | |||
247 | static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr) | ||
248 | { | ||
249 | unsigned long kaddr = (unsigned long)_kaddr; | ||
250 | return pbase_map[kaddr >> HPAGE_SHIFT] + | ||
251 | ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT); | ||
252 | } | ||
253 | |||
254 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
255 | { | ||
256 | return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT); | ||
257 | } | ||
258 | |||
259 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
260 | { | ||
261 | unsigned long pfn = kaddr_to_pfn(kaddr); | ||
262 | return ((phys_addr_t)pfn << PAGE_SHIFT) + | ||
263 | ((unsigned long)kaddr & (PAGE_SIZE-1)); | ||
264 | } | ||
265 | |||
266 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
267 | { | ||
268 | return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1)); | ||
269 | } | ||
270 | |||
271 | /* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */ | ||
272 | static inline int virt_addr_valid(const volatile void *kaddr) | ||
273 | { | ||
274 | extern void *high_memory; /* copied from <linux/mm.h> */ | ||
275 | return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory); | ||
276 | } | ||
277 | |||
278 | #else /* !CONFIG_HIGHMEM */ | ||
279 | |||
280 | static inline unsigned long kaddr_to_pfn(const volatile void *kaddr) | ||
281 | { | ||
282 | return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT; | ||
283 | } | ||
284 | |||
285 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
286 | { | ||
287 | return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET); | ||
288 | } | ||
289 | |||
290 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
291 | { | ||
292 | return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET); | ||
293 | } | ||
294 | |||
295 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
296 | { | ||
297 | return (void *)((unsigned long)paddr + PAGE_OFFSET); | ||
298 | } | ||
299 | |||
300 | /* Check that the given address is within some mapped range of PAs. */ | ||
301 | #define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr)) | ||
302 | |||
303 | #endif /* !CONFIG_HIGHMEM */ | ||
304 | |||
305 | /* All callers are not consistent in how they call these functions. */ | ||
306 | #define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr)) | ||
307 | #define __va(paddr) phys_to_virt((phys_addr_t)(paddr)) | ||
308 | |||
309 | extern int devmem_is_allowed(unsigned long pagenr); | ||
310 | |||
311 | #ifdef CONFIG_FLATMEM | ||
312 | static inline int pfn_valid(unsigned long pfn) | ||
313 | { | ||
314 | return pfn < max_mapnr; | ||
315 | } | ||
316 | #endif | ||
317 | |||
318 | /* Provide as macros since these require some other headers included. */ | ||
319 | #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) | ||
320 | #define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) | ||
321 | #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) | ||
322 | |||
323 | struct mm_struct; | ||
324 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | ||
325 | |||
326 | #endif /* !__ASSEMBLY__ */ | ||
327 | |||
328 | #define VM_DATA_DEFAULT_FLAGS \ | ||
329 | (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
330 | |||
331 | #include <asm-generic/memory_model.h> | ||
332 | #include <asm-generic/getorder.h> | ||
333 | |||
334 | #endif /* _ASM_TILE_PAGE_H */ | ||
diff --git a/arch/tile/include/asm/param.h b/arch/tile/include/asm/param.h new file mode 100644 index 000000000000..965d45427975 --- /dev/null +++ b/arch/tile/include/asm/param.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/param.h> | |||
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h new file mode 100644 index 000000000000..e853b0e2793b --- /dev/null +++ b/arch/tile/include/asm/pci-bridge.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_BRIDGE_H | ||
16 | #define _ASM_TILE_PCI_BRIDGE_H | ||
17 | |||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | struct device_node; | ||
22 | struct pci_controller; | ||
23 | |||
24 | /* | ||
25 | * pci_io_base returns the memory address at which you can access | ||
26 | * the I/O space for PCI bus number `bus' (or NULL on error). | ||
27 | */ | ||
28 | extern void __iomem *pci_bus_io_base(unsigned int bus); | ||
29 | extern unsigned long pci_bus_io_base_phys(unsigned int bus); | ||
30 | extern unsigned long pci_bus_mem_base_phys(unsigned int bus); | ||
31 | |||
32 | /* Allocate a new PCI host bridge structure */ | ||
33 | extern struct pci_controller *pcibios_alloc_controller(void); | ||
34 | |||
35 | /* Helper function for setting up resources */ | ||
36 | extern void pci_init_resource(struct resource *res, unsigned long start, | ||
37 | unsigned long end, int flags, char *name); | ||
38 | |||
39 | /* Get the PCI host controller for a bus */ | ||
40 | extern struct pci_controller *pci_bus_to_hose(int bus); | ||
41 | |||
42 | /* | ||
43 | * Structure of a PCI controller (host bridge) | ||
44 | */ | ||
45 | struct pci_controller { | ||
46 | int index; /* PCI domain number */ | ||
47 | struct pci_bus *root_bus; | ||
48 | |||
49 | int first_busno; | ||
50 | int last_busno; | ||
51 | |||
52 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
53 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
54 | |||
55 | struct pci_ops *ops; | ||
56 | |||
57 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
58 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
59 | |||
60 | /* Address ranges that are routed to this controller/bridge. */ | ||
61 | struct resource mem_resources[3]; | ||
62 | }; | ||
63 | |||
64 | static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | ||
65 | { | ||
66 | return bus->sysdata; | ||
67 | } | ||
68 | |||
69 | extern void setup_indirect_pci_nomap(struct pci_controller *hose, | ||
70 | void __iomem *cfg_addr, void __iomem *cfg_data); | ||
71 | extern void setup_indirect_pci(struct pci_controller *hose, | ||
72 | u32 cfg_addr, u32 cfg_data); | ||
73 | extern void setup_grackle(struct pci_controller *hose); | ||
74 | |||
75 | extern unsigned char common_swizzle(struct pci_dev *, unsigned char *); | ||
76 | |||
77 | /* | ||
78 | * The following code swizzles for exactly one bridge. The routine | ||
79 | * common_swizzle below handles multiple bridges. But there are a | ||
80 | * some boards that don't follow the PCI spec's suggestion so we | ||
81 | * break this piece out separately. | ||
82 | */ | ||
83 | static inline unsigned char bridge_swizzle(unsigned char pin, | ||
84 | unsigned char idsel) | ||
85 | { | ||
86 | return (((pin-1) + idsel) % 4) + 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * The following macro is used to lookup irqs in a standard table | ||
91 | * format for those PPC systems that do not already have PCI | ||
92 | * interrupts properly routed. | ||
93 | */ | ||
94 | /* FIXME - double check this */ | ||
95 | #define PCI_IRQ_TABLE_LOOKUP ({ \ | ||
96 | long _ctl_ = -1; \ | ||
97 | if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ | ||
98 | _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ | ||
99 | _ctl_; \ | ||
100 | }) | ||
101 | |||
102 | /* | ||
103 | * Scan the buses below a given PCI host bridge and assign suitable | ||
104 | * resources to all devices found. | ||
105 | */ | ||
106 | extern int pciauto_bus_scan(struct pci_controller *, int); | ||
107 | |||
108 | #ifdef CONFIG_PCI | ||
109 | extern unsigned long pci_address_to_pio(phys_addr_t address); | ||
110 | #else | ||
111 | static inline unsigned long pci_address_to_pio(phys_addr_t address) | ||
112 | { | ||
113 | return (unsigned long)-1; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #endif /* _ASM_TILE_PCI_BRIDGE_H */ | ||
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h new file mode 100644 index 000000000000..b0c15da2d5d5 --- /dev/null +++ b/arch/tile/include/asm/pci.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_H | ||
16 | #define _ASM_TILE_PCI_H | ||
17 | |||
18 | #include <asm/pci-bridge.h> | ||
19 | |||
20 | /* | ||
21 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | ||
22 | * bus addresses are physical addresses. The networking and block | ||
23 | * device layers use this boolean for bounce buffer decisions. | ||
24 | */ | ||
25 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
26 | |||
27 | struct pci_controller *pci_bus_to_hose(int bus); | ||
28 | unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp); | ||
29 | int __init tile_pci_init(void); | ||
30 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | ||
31 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
32 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||
33 | |||
34 | int __devinit _tile_cfg_read(struct pci_controller *hose, | ||
35 | int bus, | ||
36 | int slot, | ||
37 | int function, | ||
38 | int offset, | ||
39 | int size, | ||
40 | u32 *val); | ||
41 | int __devinit _tile_cfg_write(struct pci_controller *hose, | ||
42 | int bus, | ||
43 | int slot, | ||
44 | int function, | ||
45 | int offset, | ||
46 | int size, | ||
47 | u32 val); | ||
48 | |||
49 | /* | ||
50 | * These are used to to config reads and writes in the early stages of | ||
51 | * setup before the driver infrastructure has been set up enough to be | ||
52 | * able to do config reads and writes. | ||
53 | */ | ||
54 | #define early_cfg_read(where, size, value) \ | ||
55 | _tile_cfg_read(controller, \ | ||
56 | current_bus, \ | ||
57 | pci_slot, \ | ||
58 | pci_fn, \ | ||
59 | where, \ | ||
60 | size, \ | ||
61 | value) | ||
62 | |||
63 | #define early_cfg_write(where, size, value) \ | ||
64 | _tile_cfg_write(controller, \ | ||
65 | current_bus, \ | ||
66 | pci_slot, \ | ||
67 | pci_fn, \ | ||
68 | where, \ | ||
69 | size, \ | ||
70 | value) | ||
71 | |||
72 | |||
73 | |||
74 | #define PCICFG_BYTE 1 | ||
75 | #define PCICFG_WORD 2 | ||
76 | #define PCICFG_DWORD 4 | ||
77 | |||
78 | #define TILE_NUM_PCIE 2 | ||
79 | |||
80 | #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) | ||
81 | |||
82 | /* | ||
83 | * This decides whether to display the domain number in /proc. | ||
84 | */ | ||
85 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * I/O space is currently not supported. | ||
92 | */ | ||
93 | |||
94 | #define TILE_PCIE_LOWER_IO 0x0 | ||
95 | #define TILE_PCIE_UPPER_IO 0x10000 | ||
96 | #define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF | ||
97 | |||
98 | #define _PAGE_NO_CACHE 0 | ||
99 | #define _PAGE_GUARDED 0 | ||
100 | |||
101 | |||
102 | #define pcibios_assign_all_busses() pci_assign_all_buses | ||
103 | extern int pci_assign_all_buses; | ||
104 | |||
105 | static inline void pcibios_set_master(struct pci_dev *dev) | ||
106 | { | ||
107 | /* No special bus mastering setup handling */ | ||
108 | } | ||
109 | |||
110 | #define PCIBIOS_MIN_MEM 0 | ||
111 | #define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO | ||
112 | |||
113 | /* | ||
114 | * This flag tells if the platform is TILEmpower that needs | ||
115 | * special configuration for the PLX switch chip. | ||
116 | */ | ||
117 | extern int blade_pci; | ||
118 | |||
119 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | ||
120 | #include <asm-generic/pci-dma-compat.h> | ||
121 | |||
122 | /* generic pci stuff */ | ||
123 | #include <asm-generic/pci.h> | ||
124 | |||
125 | /* Use any cpu for PCI. */ | ||
126 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
127 | |||
128 | #endif /* _ASM_TILE_PCI_H */ | ||
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h new file mode 100644 index 000000000000..63294f5a8efb --- /dev/null +++ b/arch/tile/include/asm/percpu.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PERCPU_H | ||
16 | #define _ASM_TILE_PERCPU_H | ||
17 | |||
18 | register unsigned long __my_cpu_offset __asm__("tp"); | ||
19 | #define __my_cpu_offset __my_cpu_offset | ||
20 | #define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) | ||
21 | |||
22 | #include <asm-generic/percpu.h> | ||
23 | |||
24 | #endif /* _ASM_TILE_PERCPU_H */ | ||
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h new file mode 100644 index 000000000000..cf52791a5501 --- /dev/null +++ b/arch/tile/include/asm/pgalloc.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PGALLOC_H | ||
16 | #define _ASM_TILE_PGALLOC_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/mmzone.h> | ||
21 | #include <asm/fixmap.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Bits for the size of the second-level page table. */ | ||
25 | #define L2_KERNEL_PGTABLE_SHIFT \ | ||
26 | (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) | ||
27 | |||
28 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ | ||
29 | #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL | ||
30 | #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | ||
31 | #else | ||
32 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT | ||
33 | #endif | ||
34 | |||
35 | /* How many pages do we need, as an "order", for a user L2 page table? */ | ||
36 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) | ||
37 | |||
38 | /* How big is a kernel L2 page table? */ | ||
39 | #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) | ||
40 | |||
41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | ||
42 | { | ||
43 | #ifdef CONFIG_64BIT | ||
44 | set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER); | ||
45 | #else | ||
46 | set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER); | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | static inline void pmd_populate_kernel(struct mm_struct *mm, | ||
51 | pmd_t *pmd, pte_t *ptep) | ||
52 | { | ||
53 | set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, | ||
54 | __pgprot(_PAGE_PRESENT))); | ||
55 | } | ||
56 | |||
57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | ||
58 | pgtable_t page) | ||
59 | { | ||
60 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), | ||
61 | __pgprot(_PAGE_PRESENT))); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Allocate and free page tables. | ||
66 | */ | ||
67 | |||
68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | ||
70 | |||
71 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | ||
72 | extern void pte_free(struct mm_struct *mm, struct page *pte); | ||
73 | |||
74 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
75 | |||
76 | static inline pte_t * | ||
77 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
78 | { | ||
79 | return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); | ||
80 | } | ||
81 | |||
82 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
83 | { | ||
84 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | ||
85 | pte_free(mm, virt_to_page(pte)); | ||
86 | } | ||
87 | |||
88 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
89 | unsigned long address); | ||
90 | |||
91 | #define check_pgt_cache() do { } while (0) | ||
92 | |||
93 | /* | ||
94 | * Get the small-page pte_t lowmem entry for a given pfn. | ||
95 | * This may or may not be in use, depending on whether the initial | ||
96 | * huge-page entry for the page has already been shattered. | ||
97 | */ | ||
98 | pte_t *get_prealloc_pte(unsigned long pfn); | ||
99 | |||
100 | /* During init, we can shatter kernel huge pages if needed. */ | ||
101 | void shatter_pmd(pmd_t *pmd); | ||
102 | |||
103 | #ifdef __tilegx__ | ||
104 | /* We share a single page allocator for both L1 and L2 page tables. */ | ||
105 | #if HV_L1_SIZE != HV_L2_SIZE | ||
106 | # error Rework assumption that L1 and L2 page tables are same size. | ||
107 | #endif | ||
108 | #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER | ||
109 | #define pud_populate(mm, pud, pmd) \ | ||
110 | pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) | ||
111 | #define pmd_alloc_one(mm, addr) \ | ||
112 | ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) | ||
113 | #define pmd_free(mm, pmdp) \ | ||
114 | pte_free((mm), virt_to_page(pmdp)) | ||
115 | #define __pmd_free_tlb(tlb, pmdp, address) \ | ||
116 | __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) | ||
117 | #endif | ||
118 | |||
119 | #endif /* _ASM_TILE_PGALLOC_H */ | ||
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h new file mode 100644 index 000000000000..beb1504e9c10 --- /dev/null +++ b/arch/tile/include/asm/pgtable.h | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains the functions and defines necessary to modify and use | ||
15 | * the TILE page table tree. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_PGTABLE_H | ||
19 | #define _ASM_TILE_PGTABLE_H | ||
20 | |||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/threads.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/fixmap.h> | ||
32 | #include <asm/system.h> | ||
33 | |||
34 | struct mm_struct; | ||
35 | struct vm_area_struct; | ||
36 | |||
37 | /* | ||
38 | * ZERO_PAGE is a global shared page that is always zero: used | ||
39 | * for zero-mapped memory areas etc.. | ||
40 | */ | ||
41 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
42 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
43 | |||
44 | extern pgd_t swapper_pg_dir[]; | ||
45 | extern pgprot_t swapper_pgprot; | ||
46 | extern struct kmem_cache *pgd_cache; | ||
47 | extern spinlock_t pgd_lock; | ||
48 | extern struct list_head pgd_list; | ||
49 | |||
50 | /* | ||
51 | * The very last slots in the pgd_t are for addresses unusable by Linux | ||
52 | * (pgd_addr_invalid() returns true). So we use them for the list structure. | ||
53 | * The x86 code we are modelled on uses the page->private/index fields | ||
54 | * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since | ||
55 | * our pgds are so much smaller than a page, it seems a waste to | ||
56 | * spend a whole page on each pgd. | ||
57 | */ | ||
58 | #define PGD_LIST_OFFSET \ | ||
59 | ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head)) | ||
60 | #define pgd_to_list(pgd) \ | ||
61 | ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET)) | ||
62 | #define list_to_pgd(list) \ | ||
63 | ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET)) | ||
64 | |||
65 | extern void pgtable_cache_init(void); | ||
66 | extern void paging_init(void); | ||
67 | extern void set_page_homes(void); | ||
68 | |||
69 | #define FIRST_USER_ADDRESS 0 | ||
70 | |||
71 | #define _PAGE_PRESENT HV_PTE_PRESENT | ||
72 | #define _PAGE_HUGE_PAGE HV_PTE_PAGE | ||
73 | #define _PAGE_READABLE HV_PTE_READABLE | ||
74 | #define _PAGE_WRITABLE HV_PTE_WRITABLE | ||
75 | #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE | ||
76 | #define _PAGE_ACCESSED HV_PTE_ACCESSED | ||
77 | #define _PAGE_DIRTY HV_PTE_DIRTY | ||
78 | #define _PAGE_GLOBAL HV_PTE_GLOBAL | ||
79 | #define _PAGE_USER HV_PTE_USER | ||
80 | |||
81 | /* | ||
82 | * All the "standard" bits. Cache-control bits are managed elsewhere. | ||
83 | * This is used to test for valid level-2 page table pointers by checking | ||
84 | * all the bits, and to mask away the cache control bits for mprotect. | ||
85 | */ | ||
86 | #define _PAGE_ALL (\ | ||
87 | _PAGE_PRESENT | \ | ||
88 | _PAGE_HUGE_PAGE | \ | ||
89 | _PAGE_READABLE | \ | ||
90 | _PAGE_WRITABLE | \ | ||
91 | _PAGE_EXECUTABLE | \ | ||
92 | _PAGE_ACCESSED | \ | ||
93 | _PAGE_DIRTY | \ | ||
94 | _PAGE_GLOBAL | \ | ||
95 | _PAGE_USER \ | ||
96 | ) | ||
97 | |||
98 | #define PAGE_NONE \ | ||
99 | __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | ||
100 | #define PAGE_SHARED \ | ||
101 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
102 | _PAGE_USER | _PAGE_ACCESSED) | ||
103 | |||
104 | #define PAGE_SHARED_EXEC \ | ||
105 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
106 | _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED) | ||
107 | #define PAGE_COPY_NOEXEC \ | ||
108 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
109 | #define PAGE_COPY_EXEC \ | ||
110 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
111 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
112 | #define PAGE_COPY \ | ||
113 | PAGE_COPY_NOEXEC | ||
114 | #define PAGE_READONLY \ | ||
115 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
116 | #define PAGE_READONLY_EXEC \ | ||
117 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
118 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
119 | |||
120 | #define _PAGE_KERNEL_RO \ | ||
121 | (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED) | ||
122 | #define _PAGE_KERNEL \ | ||
123 | (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY) | ||
124 | #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE) | ||
125 | |||
126 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
127 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) | ||
128 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) | ||
129 | |||
130 | #define page_to_kpgprot(p) PAGE_KERNEL | ||
131 | |||
132 | /* | ||
133 | * We could tighten these up, but for now writable or executable | ||
134 | * implies readable. | ||
135 | */ | ||
136 | #define __P000 PAGE_NONE | ||
137 | #define __P001 PAGE_READONLY | ||
138 | #define __P010 PAGE_COPY /* this is write-only, which we won't support */ | ||
139 | #define __P011 PAGE_COPY | ||
140 | #define __P100 PAGE_READONLY_EXEC | ||
141 | #define __P101 PAGE_READONLY_EXEC | ||
142 | #define __P110 PAGE_COPY_EXEC | ||
143 | #define __P111 PAGE_COPY_EXEC | ||
144 | |||
145 | #define __S000 PAGE_NONE | ||
146 | #define __S001 PAGE_READONLY | ||
147 | #define __S010 PAGE_SHARED | ||
148 | #define __S011 PAGE_SHARED | ||
149 | #define __S100 PAGE_READONLY_EXEC | ||
150 | #define __S101 PAGE_READONLY_EXEC | ||
151 | #define __S110 PAGE_SHARED_EXEC | ||
152 | #define __S111 PAGE_SHARED_EXEC | ||
153 | |||
154 | /* | ||
155 | * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT | ||
156 | * and PAGE_HUGE_PAGE, which must be one and zero, respectively. | ||
157 | * We set the ignored bits to zero. | ||
158 | */ | ||
159 | #define _PAGE_TABLE _PAGE_PRESENT | ||
160 | |||
161 | /* Inherit the caching flags from the old protection bits. */ | ||
162 | #define pgprot_modify(oldprot, newprot) \ | ||
163 | (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } | ||
164 | |||
165 | /* Just setting the PFN to zero suffices. */ | ||
166 | #define pte_pgprot(x) hv_pte_set_pfn((x), 0) | ||
167 | |||
168 | /* | ||
169 | * For PTEs and PDEs, we must clear the Present bit first when | ||
170 | * clearing a page table entry, so clear the bottom half first and | ||
171 | * enforce ordering with a barrier. | ||
172 | */ | ||
173 | static inline void __pte_clear(pte_t *ptep) | ||
174 | { | ||
175 | #ifdef __tilegx__ | ||
176 | ptep->val = 0; | ||
177 | #else | ||
178 | u32 *tmp = (u32 *)ptep; | ||
179 | tmp[0] = 0; | ||
180 | barrier(); | ||
181 | tmp[1] = 0; | ||
182 | #endif | ||
183 | } | ||
184 | #define pte_clear(mm, addr, ptep) __pte_clear(ptep) | ||
185 | |||
186 | /* | ||
187 | * The following only work if pte_present() is true. | ||
188 | * Undefined behaviour if not.. | ||
189 | */ | ||
190 | #define pte_present hv_pte_get_present | ||
191 | #define pte_user hv_pte_get_user | ||
192 | #define pte_read hv_pte_get_readable | ||
193 | #define pte_dirty hv_pte_get_dirty | ||
194 | #define pte_young hv_pte_get_accessed | ||
195 | #define pte_write hv_pte_get_writable | ||
196 | #define pte_exec hv_pte_get_executable | ||
197 | #define pte_huge hv_pte_get_page | ||
198 | #define pte_rdprotect hv_pte_clear_readable | ||
199 | #define pte_exprotect hv_pte_clear_executable | ||
200 | #define pte_mkclean hv_pte_clear_dirty | ||
201 | #define pte_mkold hv_pte_clear_accessed | ||
202 | #define pte_wrprotect hv_pte_clear_writable | ||
203 | #define pte_mksmall hv_pte_clear_page | ||
204 | #define pte_mkread hv_pte_set_readable | ||
205 | #define pte_mkexec hv_pte_set_executable | ||
206 | #define pte_mkdirty hv_pte_set_dirty | ||
207 | #define pte_mkyoung hv_pte_set_accessed | ||
208 | #define pte_mkwrite hv_pte_set_writable | ||
209 | #define pte_mkhuge hv_pte_set_page | ||
210 | |||
211 | #define pte_special(pte) 0 | ||
212 | #define pte_mkspecial(pte) (pte) | ||
213 | |||
214 | /* | ||
215 | * Use some spare bits in the PTE for user-caching tags. | ||
216 | */ | ||
217 | #define pte_set_forcecache hv_pte_set_client0 | ||
218 | #define pte_get_forcecache hv_pte_get_client0 | ||
219 | #define pte_clear_forcecache hv_pte_clear_client0 | ||
220 | #define pte_set_anyhome hv_pte_set_client1 | ||
221 | #define pte_get_anyhome hv_pte_get_client1 | ||
222 | #define pte_clear_anyhome hv_pte_clear_client1 | ||
223 | |||
224 | /* | ||
225 | * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved. | ||
226 | */ | ||
227 | #define pte_migrating hv_pte_get_migrating | ||
228 | #define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x)) | ||
229 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) | ||
230 | |||
231 | #define pte_ERROR(e) \ | ||
232 | printk("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) | ||
233 | #define pgd_ERROR(e) \ | ||
234 | printk("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
235 | |||
236 | /* | ||
237 | * set_pte_order() sets the given PTE and also sanity-checks the | ||
238 | * requested PTE against the page homecaching. Unspecified parts | ||
239 | * of the PTE are filled in when it is written to memory, i.e. all | ||
240 | * caching attributes if "!forcecache", or the home cpu if "anyhome". | ||
241 | */ | ||
242 | extern void set_pte_order(pte_t *ptep, pte_t pte, int order); | ||
243 | |||
244 | #define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0) | ||
245 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
246 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) | ||
247 | |||
248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
249 | |||
250 | static inline int pte_none(pte_t pte) | ||
251 | { | ||
252 | return !pte.val; | ||
253 | } | ||
254 | |||
255 | static inline unsigned long pte_pfn(pte_t pte) | ||
256 | { | ||
257 | return hv_pte_get_pfn(pte); | ||
258 | } | ||
259 | |||
260 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ | ||
261 | extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu); | ||
262 | extern int get_remote_cache_cpu(pgprot_t prot); | ||
263 | |||
264 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
265 | { | ||
266 | return hv_pte_set_pfn(prot, pfn); | ||
267 | } | ||
268 | |||
269 | /* Support for priority mappings. */ | ||
270 | extern void start_mm_caching(struct mm_struct *mm); | ||
271 | extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); | ||
272 | |||
273 | /* | ||
274 | * Support non-linear file mappings (see sys_remap_file_pages). | ||
275 | * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the | ||
276 | * file offset in the 32 high bits. | ||
277 | */ | ||
278 | #define _PAGE_FILE HV_PTE_CLIENT1 | ||
279 | #define PTE_FILE_MAX_BITS 32 | ||
280 | #define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte)) | ||
281 | #define pte_to_pgoff(pte) ((pte).val >> 32) | ||
282 | #define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE }) | ||
283 | |||
284 | /* | ||
285 | * Encode and de-code a swap entry (see <linux/swapops.h>). | ||
286 | * We put the swap file type+offset in the 32 high bits; | ||
287 | * I believe we can just leave the low bits clear. | ||
288 | */ | ||
289 | #define __swp_type(swp) ((swp).val & 0x1f) | ||
290 | #define __swp_offset(swp) ((swp).val >> 5) | ||
291 | #define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) }) | ||
292 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 }) | ||
293 | #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) }) | ||
294 | |||
295 | /* | ||
296 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
297 | * | ||
298 | * dst - pointer to pgd range anwhere on a pgd page | ||
299 | * src - "" | ||
300 | * count - the number of pgds to copy. | ||
301 | * | ||
302 | * dst and src can be on the same page, but the range must not overlap, | ||
303 | * and must not cross a page boundary. | ||
304 | */ | ||
305 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
306 | { | ||
307 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Conversion functions: convert a page and protection to a page entry, | ||
312 | * and a page entry and page directory to the page they refer to. | ||
313 | */ | ||
314 | |||
315 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
316 | |||
317 | /* | ||
318 | * If we are doing an mprotect(), just accept the new vma->vm_page_prot | ||
319 | * value and combine it with the PFN from the old PTE to get a new PTE. | ||
320 | */ | ||
321 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
322 | { | ||
323 | return pfn_pte(hv_pte_get_pfn(pte), newprot); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | ||
328 | * | ||
329 | * This macro returns the index of the entry in the pgd page which would | ||
330 | * control the given virtual address. | ||
331 | */ | ||
332 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
333 | |||
334 | /* | ||
335 | * pgd_offset() returns a (pgd_t *) | ||
336 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's. | ||
337 | */ | ||
338 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
339 | |||
340 | /* | ||
341 | * A shortcut which implies the use of the kernel's pgd, instead | ||
342 | * of a process's. | ||
343 | */ | ||
344 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
345 | |||
346 | #if defined(CONFIG_HIGHPTE) | ||
347 | extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); | ||
348 | #define pte_offset_map(dir, address) \ | ||
349 | _pte_offset_map(dir, address, KM_PTE0) | ||
350 | #define pte_offset_map_nested(dir, address) \ | ||
351 | _pte_offset_map(dir, address, KM_PTE1) | ||
352 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
353 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
354 | #else | ||
355 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | ||
356 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
357 | #define pte_unmap(pte) do { } while (0) | ||
358 | #define pte_unmap_nested(pte) do { } while (0) | ||
359 | #endif | ||
360 | |||
361 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ | ||
362 | #define kpte_clear_flush(ptep, vaddr) \ | ||
363 | do { \ | ||
364 | pte_clear(&init_mm, (vaddr), (ptep)); \ | ||
365 | local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \ | ||
366 | } while (0) | ||
367 | |||
368 | /* | ||
369 | * The kernel page tables contain what we need, and we flush when we | ||
370 | * change specific page table entries. | ||
371 | */ | ||
372 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
373 | |||
374 | #ifdef CONFIG_FLATMEM | ||
375 | #define kern_addr_valid(addr) (1) | ||
376 | #endif /* CONFIG_FLATMEM */ | ||
377 | |||
378 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
379 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
380 | |||
381 | extern void vmalloc_sync_all(void); | ||
382 | |||
383 | #endif /* !__ASSEMBLY__ */ | ||
384 | |||
385 | #ifdef __tilegx__ | ||
386 | #include <asm/pgtable_64.h> | ||
387 | #else | ||
388 | #include <asm/pgtable_32.h> | ||
389 | #endif | ||
390 | |||
391 | #ifndef __ASSEMBLY__ | ||
392 | |||
393 | static inline int pmd_none(pmd_t pmd) | ||
394 | { | ||
395 | /* | ||
396 | * Only check low word on 32-bit platforms, since it might be | ||
397 | * out of sync with upper half. | ||
398 | */ | ||
399 | return (unsigned long)pmd_val(pmd) == 0; | ||
400 | } | ||
401 | |||
402 | static inline int pmd_present(pmd_t pmd) | ||
403 | { | ||
404 | return pmd_val(pmd) & _PAGE_PRESENT; | ||
405 | } | ||
406 | |||
407 | static inline int pmd_bad(pmd_t pmd) | ||
408 | { | ||
409 | return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE); | ||
410 | } | ||
411 | |||
412 | static inline unsigned long pages_to_mb(unsigned long npg) | ||
413 | { | ||
414 | return npg >> (20 - PAGE_SHIFT); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD] | ||
419 | * | ||
420 | * This function returns the index of the entry in the pmd which would | ||
421 | * control the given virtual address. | ||
422 | */ | ||
423 | static inline unsigned long pmd_index(unsigned long address) | ||
424 | { | ||
425 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * A given kernel pmd_t maps to a specific virtual address (either a | ||
430 | * kernel huge page or a kernel pte_t table). Since kernel pte_t | ||
431 | * tables can be aligned at sub-page granularity, this function can | ||
432 | * return non-page-aligned pointers, despite its name. | ||
433 | */ | ||
434 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | ||
435 | { | ||
436 | phys_addr_t pa = | ||
437 | (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN; | ||
438 | return (unsigned long)__va(pa); | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * A pmd_t points to the base of a huge page or to a pte_t array. | ||
443 | * If a pte_t array, since we can have multiple per page, we don't | ||
444 | * have a one-to-one mapping of pmd_t's to pages. However, this is | ||
445 | * OK for pte_lockptr(), since we just end up with potentially one | ||
446 | * lock being used for several pte_t arrays. | ||
447 | */ | ||
448 | #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) | ||
449 | |||
450 | /* | ||
451 | * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | ||
452 | * | ||
453 | * This macro returns the index of the entry in the pte page which would | ||
454 | * control the given virtual address. | ||
455 | */ | ||
456 | static inline unsigned long pte_index(unsigned long address) | ||
457 | { | ||
458 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | ||
459 | } | ||
460 | |||
461 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | ||
462 | { | ||
463 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | ||
464 | } | ||
465 | |||
466 | static inline int pmd_huge_page(pmd_t pmd) | ||
467 | { | ||
468 | return pmd_val(pmd) & _PAGE_HUGE_PAGE; | ||
469 | } | ||
470 | |||
471 | #include <asm-generic/pgtable.h> | ||
472 | |||
473 | #endif /* !__ASSEMBLY__ */ | ||
474 | |||
475 | #endif /* _ASM_TILE_PGTABLE_H */ | ||
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h new file mode 100644 index 000000000000..b935fb2ad4f3 --- /dev/null +++ b/arch/tile/include/asm/pgtable_32.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_PGTABLE_32_H | ||
17 | #define _ASM_TILE_PGTABLE_32_H | ||
18 | |||
19 | /* | ||
20 | * The level-1 index is defined by the huge page size. A PGD is composed | ||
21 | * of PTRS_PER_PGD pgd_t's and is the top level of the page table. | ||
22 | */ | ||
23 | #define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE | ||
24 | #define PGDIR_SIZE HV_PAGE_SIZE_LARGE | ||
25 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
26 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
27 | |||
28 | /* | ||
29 | * The level-2 index is defined by the difference between the huge | ||
30 | * page size and the normal page size. A PTE is composed of | ||
31 | * PTRS_PER_PTE pte_t's and is the bottom level of the page table. | ||
32 | * Note that the hypervisor docs use PTE for what we call pte_t, so | ||
33 | * this nomenclature is somewhat confusing. | ||
34 | */ | ||
35 | #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | /* | ||
40 | * Right now we initialize only a single pte table. It can be extended | ||
41 | * easily, subsequent pte tables have to be allocated in one physical | ||
42 | * chunk of RAM. | ||
43 | * | ||
44 | * HOWEVER, if we are using an allocation scheme with slop after the | ||
45 | * end of the page table (e.g. where our L2 page tables are 2KB but | ||
46 | * our pages are 64KB and we are allocating via the page allocator) | ||
47 | * we can't extend it easily. | ||
48 | */ | ||
49 | #define LAST_PKMAP PTRS_PER_PTE | ||
50 | |||
51 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK) | ||
52 | |||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | # define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) | ||
55 | #else | ||
56 | # define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1)) | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_HUGEVMAP | ||
60 | #define HUGE_VMAP_END __VMAPPING_END | ||
61 | #define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) | ||
62 | #define _VMALLOC_END HUGE_VMAP_BASE | ||
63 | #else | ||
64 | #define _VMALLOC_END __VMAPPING_END | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Align the vmalloc area to an L2 page table, and leave a guard page | ||
69 | * at the beginning and end. The vmalloc code also puts in an internal | ||
70 | * guard page between each allocation. | ||
71 | */ | ||
72 | #define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) | ||
73 | extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; | ||
74 | #define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE) | ||
75 | #define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) | ||
76 | |||
77 | /* This is the maximum possible amount of lowmem. */ | ||
78 | #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) | ||
79 | |||
80 | /* We have no pmd or pud since we are strictly a two-level page table */ | ||
81 | #include <asm-generic/pgtable-nopmd.h> | ||
82 | |||
83 | /* We don't define any pgds for these addresses. */ | ||
84 | static inline int pgd_addr_invalid(unsigned long addr) | ||
85 | { | ||
86 | return addr >= MEM_HV_INTRPT; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Provide versions of these routines that can be used safely when | ||
91 | * the hypervisor may be asynchronously modifying dirty/accessed bits. | ||
92 | */ | ||
93 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
94 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
95 | |||
96 | extern int ptep_test_and_clear_young(struct vm_area_struct *, | ||
97 | unsigned long addr, pte_t *); | ||
98 | extern void ptep_set_wrprotect(struct mm_struct *, | ||
99 | unsigned long addr, pte_t *); | ||
100 | |||
101 | /* Create a pmd from a PTFN. */ | ||
102 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | ||
103 | { | ||
104 | return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } }; | ||
105 | } | ||
106 | |||
107 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
108 | #define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd) | ||
109 | |||
110 | static inline void pmd_clear(pmd_t *pmdp) | ||
111 | { | ||
112 | __pte_clear(&pmdp->pud.pgd); | ||
113 | } | ||
114 | |||
115 | #endif /* __ASSEMBLY__ */ | ||
116 | |||
117 | #endif /* _ASM_TILE_PGTABLE_32_H */ | ||
diff --git a/arch/tile/include/asm/poll.h b/arch/tile/include/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/tile/include/asm/poll.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/poll.h> | |||
diff --git a/arch/tile/include/asm/posix_types.h b/arch/tile/include/asm/posix_types.h new file mode 100644 index 000000000000..22cae6230ceb --- /dev/null +++ b/arch/tile/include/asm/posix_types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/posix_types.h> | |||
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h new file mode 100644 index 000000000000..96c50d2c4c2b --- /dev/null +++ b/arch/tile/include/asm/processor.h | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PROCESSOR_H | ||
16 | #define _ASM_TILE_PROCESSOR_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | /* | ||
21 | * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one | ||
22 | * normally would, due to #include dependencies. | ||
23 | */ | ||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/percpu.h> | ||
26 | |||
27 | #include <arch/chip.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | struct task_struct; | ||
31 | struct thread_struct; | ||
32 | struct list_head; | ||
33 | |||
34 | typedef struct { | ||
35 | unsigned long seg; | ||
36 | } mm_segment_t; | ||
37 | |||
38 | /* | ||
39 | * Default implementation of macro that returns current | ||
40 | * instruction pointer ("program counter"). | ||
41 | */ | ||
42 | void *current_text_addr(void); | ||
43 | |||
44 | #if CHIP_HAS_TILE_DMA() | ||
45 | /* Capture the state of a suspended DMA. */ | ||
46 | struct tile_dma_state { | ||
47 | int enabled; | ||
48 | unsigned long src; | ||
49 | unsigned long dest; | ||
50 | unsigned long strides; | ||
51 | unsigned long chunk_size; | ||
52 | unsigned long src_chunk; | ||
53 | unsigned long dest_chunk; | ||
54 | unsigned long byte; | ||
55 | unsigned long status; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * A mask of the DMA status register for selecting only the 'running' | ||
60 | * and 'done' bits. | ||
61 | */ | ||
62 | #define DMA_STATUS_MASK \ | ||
63 | (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK) | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Track asynchronous TLB events (faults and access violations) | ||
68 | * that occur while we are in kernel mode from DMA or the SN processor. | ||
69 | */ | ||
70 | struct async_tlb { | ||
71 | short fault_num; /* original fault number; 0 if none */ | ||
72 | char is_fault; /* was it a fault (vs an access violation) */ | ||
73 | char is_write; /* for fault: was it caused by a write? */ | ||
74 | unsigned long address; /* what address faulted? */ | ||
75 | }; | ||
76 | |||
77 | |||
78 | struct thread_struct { | ||
79 | /* kernel stack pointer */ | ||
80 | unsigned long ksp; | ||
81 | /* kernel PC */ | ||
82 | unsigned long pc; | ||
83 | /* starting user stack pointer (for page migration) */ | ||
84 | unsigned long usp0; | ||
85 | /* pid of process that created this one */ | ||
86 | pid_t creator_pid; | ||
87 | #if CHIP_HAS_TILE_DMA() | ||
88 | /* DMA info for suspended threads (byte == 0 means no DMA state) */ | ||
89 | struct tile_dma_state tile_dma_state; | ||
90 | #endif | ||
91 | /* User EX_CONTEXT registers */ | ||
92 | unsigned long ex_context[2]; | ||
93 | /* User SYSTEM_SAVE registers */ | ||
94 | unsigned long system_save[4]; | ||
95 | /* User interrupt mask */ | ||
96 | unsigned long long interrupt_mask; | ||
97 | /* User interrupt-control 0 state */ | ||
98 | unsigned long intctrl_0; | ||
99 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
100 | /* Any other miscellaneous processor state bits */ | ||
101 | unsigned long proc_status; | ||
102 | #endif | ||
103 | #if CHIP_HAS_TILE_DMA() | ||
104 | /* Async DMA TLB fault information */ | ||
105 | struct async_tlb dma_async_tlb; | ||
106 | #endif | ||
107 | #if CHIP_HAS_SN_PROC() | ||
108 | /* Was static network processor when we were switched out? */ | ||
109 | int sn_proc_running; | ||
110 | /* Async SNI TLB fault information */ | ||
111 | struct async_tlb sn_async_tlb; | ||
112 | #endif | ||
113 | }; | ||
114 | |||
115 | #endif /* !__ASSEMBLY__ */ | ||
116 | |||
117 | /* | ||
118 | * Start with "sp" this many bytes below the top of the kernel stack. | ||
119 | * This preserves the invariant that a called function may write to *sp. | ||
120 | */ | ||
121 | #define STACK_TOP_DELTA 8 | ||
122 | |||
123 | /* | ||
124 | * When entering the kernel via a fault, start with the top of the | ||
125 | * pt_regs structure this many bytes below the top of the page. | ||
126 | * This aligns the pt_regs structure optimally for cache-line access. | ||
127 | */ | ||
128 | #ifdef __tilegx__ | ||
129 | #define KSTK_PTREGS_GAP 48 | ||
130 | #else | ||
131 | #define KSTK_PTREGS_GAP 56 | ||
132 | #endif | ||
133 | |||
134 | #ifndef __ASSEMBLY__ | ||
135 | |||
136 | #ifdef __tilegx__ | ||
137 | #define TASK_SIZE_MAX (MEM_LOW_END + 1) | ||
138 | #else | ||
139 | #define TASK_SIZE_MAX PAGE_OFFSET | ||
140 | #endif | ||
141 | |||
142 | /* TASK_SIZE and related variables are always checked in "current" context. */ | ||
143 | #ifdef CONFIG_COMPAT | ||
144 | #define COMPAT_TASK_SIZE (1UL << 31) | ||
145 | #define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\ | ||
146 | COMPAT_TASK_SIZE : TASK_SIZE_MAX) | ||
147 | #else | ||
148 | #define TASK_SIZE TASK_SIZE_MAX | ||
149 | #endif | ||
150 | |||
151 | /* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */ | ||
152 | #define VDSO_BASE (TASK_SIZE - PAGE_SIZE) | ||
153 | |||
154 | #define STACK_TOP VDSO_BASE | ||
155 | |||
156 | /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */ | ||
157 | #define STACK_TOP_MAX TASK_SIZE_MAX | ||
158 | |||
159 | /* | ||
160 | * This decides where the kernel will search for a free chunk of vm | ||
161 | * space during mmap's, if it is using bottom-up mapping. | ||
162 | */ | ||
163 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
164 | |||
165 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
166 | |||
167 | #define INIT_THREAD { \ | ||
168 | .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \ | ||
169 | .interrupt_mask = -1ULL \ | ||
170 | } | ||
171 | |||
172 | /* Kernel stack top for the task that first boots on this cpu. */ | ||
173 | DECLARE_PER_CPU(unsigned long, boot_sp); | ||
174 | |||
175 | /* PC to boot from on this cpu. */ | ||
176 | DECLARE_PER_CPU(unsigned long, boot_pc); | ||
177 | |||
178 | /* Do necessary setup to start up a newly executed thread. */ | ||
179 | static inline void start_thread(struct pt_regs *regs, | ||
180 | unsigned long pc, unsigned long usp) | ||
181 | { | ||
182 | regs->pc = pc; | ||
183 | regs->sp = usp; | ||
184 | } | ||
185 | |||
186 | /* Free all resources held by a thread. */ | ||
187 | static inline void release_thread(struct task_struct *dead_task) | ||
188 | { | ||
189 | /* Nothing for now */ | ||
190 | } | ||
191 | |||
192 | /* Prepare to copy thread state - unlazy all lazy status. */ | ||
193 | #define prepare_to_copy(tsk) do { } while (0) | ||
194 | |||
195 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
196 | |||
197 | /* Helper routines for setting home cache modes at exec() time. */ | ||
198 | |||
199 | |||
200 | /* | ||
201 | * Return saved (kernel) PC of a blocked thread. | ||
202 | * Only used in a printk() in kernel/sched.c, so don't work too hard. | ||
203 | */ | ||
204 | #define thread_saved_pc(t) ((t)->thread.pc) | ||
205 | |||
206 | unsigned long get_wchan(struct task_struct *p); | ||
207 | |||
208 | /* Return initial ksp value for given task. */ | ||
209 | #define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE) | ||
210 | |||
211 | /* Return some info about the user process TASK. */ | ||
212 | #define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA) | ||
213 | #define task_pt_regs(task) \ | ||
214 | ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1) | ||
215 | #define task_sp(task) (task_pt_regs(task)->sp) | ||
216 | #define task_pc(task) (task_pt_regs(task)->pc) | ||
217 | /* Aliases for pc and sp (used in fs/proc/array.c) */ | ||
218 | #define KSTK_EIP(task) task_pc(task) | ||
219 | #define KSTK_ESP(task) task_sp(task) | ||
220 | |||
221 | /* Standard format for printing registers and other word-size data. */ | ||
222 | #ifdef __tilegx__ | ||
223 | # define REGFMT "0x%016lx" | ||
224 | #else | ||
225 | # define REGFMT "0x%08lx" | ||
226 | #endif | ||
227 | |||
228 | /* | ||
229 | * Do some slow action (e.g. read a slow SPR). | ||
230 | * Note that this must also have compiler-barrier semantics since | ||
231 | * it may be used in a busy loop reading memory. | ||
232 | */ | ||
233 | static inline void cpu_relax(void) | ||
234 | { | ||
235 | __insn_mfspr(SPR_PASS); | ||
236 | barrier(); | ||
237 | } | ||
238 | |||
239 | struct siginfo; | ||
240 | extern void arch_coredump_signal(struct siginfo *, struct pt_regs *); | ||
241 | #define arch_coredump_signal arch_coredump_signal | ||
242 | |||
243 | /* Provide information about the chip model. */ | ||
244 | extern char chip_model[64]; | ||
245 | |||
246 | /* Data on which physical memory controller corresponds to which NUMA node. */ | ||
247 | extern int node_controller[]; | ||
248 | |||
249 | |||
250 | /* Do we dump information to the console when a user application crashes? */ | ||
251 | extern int show_crashinfo; | ||
252 | |||
253 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
254 | /* Does the heap allocator return hash-for-home pages by default? */ | ||
255 | extern int hash_default; | ||
256 | |||
257 | /* Should kernel stack pages be hash-for-home? */ | ||
258 | extern int kstack_hash; | ||
259 | #else | ||
260 | #define hash_default 0 | ||
261 | #define kstack_hash 0 | ||
262 | #endif | ||
263 | |||
264 | /* Are we using huge pages in the TLB for kernel data? */ | ||
265 | extern int kdata_huge; | ||
266 | |||
267 | /* | ||
268 | * Note that with OLOC the prefetch will return an unused read word to | ||
269 | * the issuing tile, which will cause some MDN traffic. Benchmarking | ||
270 | * should be done to see whether this outweighs prefetching. | ||
271 | */ | ||
272 | #define ARCH_HAS_PREFETCH | ||
273 | #define ARCH_HAS_PREFETCHW | ||
274 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
275 | |||
276 | #define prefetch(ptr) __builtin_prefetch((ptr), 0, 3) | ||
277 | #define prefetchw(ptr) __builtin_prefetch((ptr), 1, 3) | ||
278 | |||
279 | #ifdef CONFIG_SMP | ||
280 | #define spin_lock_prefetch(ptr) prefetchw(ptr) | ||
281 | #else | ||
282 | /* Nothing to prefetch. */ | ||
283 | #define spin_lock_prefetch(lock) do { } while (0) | ||
284 | #endif | ||
285 | |||
286 | #else /* __ASSEMBLY__ */ | ||
287 | |||
288 | /* Do some slow action (e.g. read a slow SPR). */ | ||
289 | #define CPU_RELAX mfspr zero, SPR_PASS | ||
290 | |||
291 | #endif /* !__ASSEMBLY__ */ | ||
292 | |||
293 | /* Assembly code assumes that the PL is in the low bits. */ | ||
294 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0 | ||
295 | # error Fix assembly assumptions about PL | ||
296 | #endif | ||
297 | |||
298 | /* We sometimes use these macros for EX_CONTEXT_0_1 as well. */ | ||
299 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \ | ||
300 | SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \ | ||
301 | SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \ | ||
302 | SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK | ||
303 | # error Fix assumptions that EX1 macros work for both PL0 and PL1 | ||
304 | #endif | ||
305 | |||
306 | /* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */ | ||
307 | #define EX1_PL(ex1) \ | ||
308 | (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK) | ||
309 | #define EX1_ICS(ex1) \ | ||
310 | (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK) | ||
311 | #define PL_ICS_EX1(pl, ics) \ | ||
312 | (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \ | ||
313 | ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT)) | ||
314 | |||
315 | /* | ||
316 | * Provide symbolic constants for PLs. | ||
317 | * Note that assembly code assumes that USER_PL is zero. | ||
318 | */ | ||
319 | #define USER_PL 0 | ||
320 | #define KERNEL_PL 1 | ||
321 | |||
322 | /* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ | ||
323 | #define CPU_LOG_MASK_VALUE 12 | ||
324 | #define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) | ||
325 | #if CONFIG_NR_CPUS > CPU_MASK_VALUE | ||
326 | # error Too many cpus! | ||
327 | #endif | ||
328 | #define raw_smp_processor_id() \ | ||
329 | ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) | ||
330 | #define get_current_ksp0() \ | ||
331 | (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) | ||
332 | #define next_current_ksp0(task) ({ \ | ||
333 | unsigned long __ksp0 = task_ksp0(task); \ | ||
334 | int __cpu = raw_smp_processor_id(); \ | ||
335 | BUG_ON(__ksp0 & CPU_MASK_VALUE); \ | ||
336 | __ksp0 | __cpu; \ | ||
337 | }) | ||
338 | |||
339 | #endif /* _ASM_TILE_PROCESSOR_H */ | ||
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h new file mode 100644 index 000000000000..4d1d9953016a --- /dev/null +++ b/arch/tile/include/asm/ptrace.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PTRACE_H | ||
16 | #define _ASM_TILE_PTRACE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | #include <arch/abi.h> | ||
20 | |||
21 | /* These must match struct pt_regs, below. */ | ||
22 | #if CHIP_WORD_SIZE() == 32 | ||
23 | #define PTREGS_OFFSET_REG(n) ((n)*4) | ||
24 | #else | ||
25 | #define PTREGS_OFFSET_REG(n) ((n)*8) | ||
26 | #endif | ||
27 | #define PTREGS_OFFSET_BASE 0 | ||
28 | #define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53) | ||
29 | #define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54) | ||
30 | #define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55) | ||
31 | #define PTREGS_NR_GPRS 56 | ||
32 | #define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56) | ||
33 | #define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57) | ||
34 | #define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58) | ||
35 | #define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59) | ||
36 | #define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60) | ||
37 | #if CHIP_HAS_CMPEXCH() | ||
38 | #define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61) | ||
39 | #endif | ||
40 | #define PTREGS_SIZE PTREGS_OFFSET_REG(64) | ||
41 | |||
42 | #ifndef __ASSEMBLY__ | ||
43 | |||
44 | #ifdef __KERNEL__ | ||
45 | /* Benefit from consistent use of "long" on all chips. */ | ||
46 | typedef unsigned long pt_reg_t; | ||
47 | #else | ||
48 | /* Provide appropriate length type to userspace regardless of -m32/-m64. */ | ||
49 | typedef uint_reg_t pt_reg_t; | ||
50 | #endif | ||
51 | |||
52 | /* | ||
53 | * This struct defines the way the registers are stored on the stack during a | ||
54 | * system call/exception. It should be a multiple of 8 bytes to preserve | ||
55 | * normal stack alignment rules. | ||
56 | * | ||
57 | * Must track <sys/ucontext.h> and <sys/procfs.h> | ||
58 | */ | ||
59 | struct pt_regs { | ||
60 | /* Saved main processor registers; 56..63 are special. */ | ||
61 | /* tp, sp, and lr must immediately follow regs[] for aliasing. */ | ||
62 | pt_reg_t regs[53]; | ||
63 | pt_reg_t tp; /* aliases regs[TREG_TP] */ | ||
64 | pt_reg_t sp; /* aliases regs[TREG_SP] */ | ||
65 | pt_reg_t lr; /* aliases regs[TREG_LR] */ | ||
66 | |||
67 | /* Saved special registers. */ | ||
68 | pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ | ||
69 | pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ | ||
70 | pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ | ||
71 | pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ | ||
72 | pt_reg_t flags; /* flags (see below) */ | ||
73 | #if !CHIP_HAS_CMPEXCH() | ||
74 | pt_reg_t pad[3]; | ||
75 | #else | ||
76 | pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */ | ||
77 | pt_reg_t pad[2]; | ||
78 | #endif | ||
79 | }; | ||
80 | |||
81 | #endif /* __ASSEMBLY__ */ | ||
82 | |||
83 | /* Flag bits in pt_regs.flags */ | ||
84 | #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ | ||
85 | #define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ | ||
86 | #define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ | ||
87 | |||
88 | #define PTRACE_GETREGS 12 | ||
89 | #define PTRACE_SETREGS 13 | ||
90 | #define PTRACE_GETFPREGS 14 | ||
91 | #define PTRACE_SETFPREGS 15 | ||
92 | |||
93 | /* Support TILE-specific ptrace options, with events starting at 16. */ | ||
94 | #define PTRACE_O_TRACEMIGRATE 0x00010000 | ||
95 | #define PTRACE_EVENT_MIGRATE 16 | ||
96 | #ifdef __KERNEL__ | ||
97 | #define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE) | ||
98 | #define PT_TRACE_MIGRATE 0x00080000 | ||
99 | #define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __KERNEL__ | ||
103 | |||
104 | #ifndef __ASSEMBLY__ | ||
105 | |||
106 | #define instruction_pointer(regs) ((regs)->pc) | ||
107 | #define profile_pc(regs) instruction_pointer(regs) | ||
108 | |||
109 | /* Does the process account for user or for system time? */ | ||
110 | #define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL) | ||
111 | |||
112 | /* Fill in a struct pt_regs with the current kernel registers. */ | ||
113 | struct pt_regs *get_pt_regs(struct pt_regs *); | ||
114 | |||
115 | extern void show_regs(struct pt_regs *); | ||
116 | |||
117 | #define arch_has_single_step() (1) | ||
118 | |||
119 | /* | ||
120 | * A structure for all single-stepper state. | ||
121 | * | ||
122 | * Also update defines in assembler section if it changes | ||
123 | */ | ||
124 | struct single_step_state { | ||
125 | /* the page to which we will write hacked-up bundles */ | ||
126 | void *buffer; | ||
127 | |||
128 | union { | ||
129 | int flags; | ||
130 | struct { | ||
131 | unsigned long is_enabled:1, update:1, update_reg:6; | ||
132 | }; | ||
133 | }; | ||
134 | |||
135 | unsigned long orig_pc; /* the original PC */ | ||
136 | unsigned long next_pc; /* return PC if no branch (PC + 1) */ | ||
137 | unsigned long branch_next_pc; /* return PC if we did branch/jump */ | ||
138 | unsigned long update_value; /* value to restore to update_target */ | ||
139 | }; | ||
140 | |||
141 | /* Single-step the instruction at regs->pc */ | ||
142 | extern void single_step_once(struct pt_regs *regs); | ||
143 | |||
144 | struct task_struct; | ||
145 | |||
146 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | ||
147 | int error_code); | ||
148 | |||
149 | #ifdef __tilegx__ | ||
150 | /* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */ | ||
151 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
152 | #endif | ||
153 | |||
154 | #endif /* !__ASSEMBLY__ */ | ||
155 | |||
156 | #define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1 | ||
157 | #define SINGLESTEP_STATE_MASK_UPDATE 0x2 | ||
158 | #define SINGLESTEP_STATE_TARGET_LB 2 | ||
159 | #define SINGLESTEP_STATE_TARGET_UB 7 | ||
160 | |||
161 | #endif /* !__KERNEL__ */ | ||
162 | |||
163 | #endif /* _ASM_TILE_PTRACE_H */ | ||
diff --git a/arch/tile/include/asm/resource.h b/arch/tile/include/asm/resource.h new file mode 100644 index 000000000000..04bc4db8921b --- /dev/null +++ b/arch/tile/include/asm/resource.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/resource.h> | |||
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h new file mode 100644 index 000000000000..c5604242c0d5 --- /dev/null +++ b/arch/tile/include/asm/scatterlist.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SCATTERLIST_H | ||
16 | #define _ASM_TILE_SCATTERLIST_H | ||
17 | |||
18 | #define ISA_DMA_THRESHOLD (~0UL) | ||
19 | |||
20 | #include <asm-generic/scatterlist.h> | ||
21 | |||
22 | #endif /* _ASM_TILE_SCATTERLIST_H */ | ||
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h new file mode 100644 index 000000000000..6c111491f0ed --- /dev/null +++ b/arch/tile/include/asm/sections.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SECTIONS_H | ||
16 | #define _ASM_TILE_SECTIONS_H | ||
17 | |||
18 | #define arch_is_kernel_data arch_is_kernel_data | ||
19 | |||
20 | #include <asm-generic/sections.h> | ||
21 | |||
22 | /* Text and data are at different areas in the kernel VA space. */ | ||
23 | extern char _sinitdata[], _einitdata[]; | ||
24 | |||
25 | /* Write-once data is writable only till the end of initialization. */ | ||
26 | extern char __w1data_begin[], __w1data_end[]; | ||
27 | |||
28 | extern char __feedback_section_start[], __feedback_section_end[]; | ||
29 | |||
30 | /* Handle the discontiguity between _sdata and _stext. */ | ||
31 | static inline int arch_is_kernel_data(unsigned long addr) | ||
32 | { | ||
33 | return addr >= (unsigned long)_sdata && | ||
34 | addr < (unsigned long)_end; | ||
35 | } | ||
36 | |||
37 | #endif /* _ASM_TILE_SECTIONS_H */ | ||
diff --git a/arch/tile/include/asm/sembuf.h b/arch/tile/include/asm/sembuf.h new file mode 100644 index 000000000000..7673b83cfef7 --- /dev/null +++ b/arch/tile/include/asm/sembuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sembuf.h> | |||
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h new file mode 100644 index 000000000000..823ddd47ff6e --- /dev/null +++ b/arch/tile/include/asm/setup.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SETUP_H | ||
16 | #define _ASM_TILE_SETUP_H | ||
17 | |||
18 | #include <linux/pfn.h> | ||
19 | #include <linux/init.h> | ||
20 | |||
21 | /* | ||
22 | * Reserved space for vmalloc and iomap - defined in asm/page.h | ||
23 | */ | ||
24 | #define MAXMEM_PFN PFN_DOWN(MAXMEM) | ||
25 | |||
26 | #define COMMAND_LINE_SIZE 2048 | ||
27 | |||
28 | void early_panic(const char *fmt, ...); | ||
29 | void warn_early_printk(void); | ||
30 | void __init disable_early_printk(void); | ||
31 | |||
32 | #endif /* _ASM_TILE_SETUP_H */ | ||
diff --git a/arch/tile/include/asm/shmbuf.h b/arch/tile/include/asm/shmbuf.h new file mode 100644 index 000000000000..83c05fc2de38 --- /dev/null +++ b/arch/tile/include/asm/shmbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmbuf.h> | |||
diff --git a/arch/tile/include/asm/shmparam.h b/arch/tile/include/asm/shmparam.h new file mode 100644 index 000000000000..93f30deb95d0 --- /dev/null +++ b/arch/tile/include/asm/shmparam.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmparam.h> | |||
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h new file mode 100644 index 000000000000..7cd7672e3ad4 --- /dev/null +++ b/arch/tile/include/asm/sigcontext.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGCONTEXT_H | ||
16 | #define _ASM_TILE_SIGCONTEXT_H | ||
17 | |||
18 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | /* Must track <sys/ucontext.h> */ | ||
22 | |||
23 | struct sigcontext { | ||
24 | struct pt_regs regs; | ||
25 | }; | ||
26 | |||
27 | #endif /* _ASM_TILE_SIGCONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/sigframe.h b/arch/tile/include/asm/sigframe.h new file mode 100644 index 000000000000..994d3d30205f --- /dev/null +++ b/arch/tile/include/asm/sigframe.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGFRAME_H | ||
16 | #define _ASM_TILE_SIGFRAME_H | ||
17 | |||
18 | /* Indicate that syscall return should not examine r0 */ | ||
19 | #define INT_SWINT_1_SIGRETURN (~0) | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | #include <arch/abi.h> | ||
24 | |||
25 | struct rt_sigframe { | ||
26 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
27 | struct siginfo info; | ||
28 | struct ucontext uc; | ||
29 | }; | ||
30 | |||
31 | #endif /* !__ASSEMBLY__ */ | ||
32 | |||
33 | #endif /* _ASM_TILE_SIGFRAME_H */ | ||
diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h new file mode 100644 index 000000000000..0c12d1b9ddf2 --- /dev/null +++ b/arch/tile/include/asm/siginfo.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGINFO_H | ||
16 | #define _ASM_TILE_SIGINFO_H | ||
17 | |||
18 | #define __ARCH_SI_TRAPNO | ||
19 | |||
20 | #include <asm-generic/siginfo.h> | ||
21 | |||
22 | /* | ||
23 | * Additional Tile-specific SIGILL si_codes | ||
24 | */ | ||
25 | #define ILL_DBLFLT (__SI_FAULT|9) /* double fault */ | ||
26 | #define ILL_HARDWALL (__SI_FAULT|10) /* user networks hardwall violation */ | ||
27 | #undef NSIGILL | ||
28 | #define NSIGILL 10 | ||
29 | |||
30 | #endif /* _ASM_TILE_SIGINFO_H */ | ||
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h new file mode 100644 index 000000000000..d20d326d201b --- /dev/null +++ b/arch/tile/include/asm/signal.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGNAL_H | ||
16 | #define _ASM_TILE_SIGNAL_H | ||
17 | |||
18 | /* Do not notify a ptracer when this signal is handled. */ | ||
19 | #define SA_NOPTRACE 0x02000000u | ||
20 | |||
21 | /* Used in earlier Tilera releases, so keeping for binary compatibility. */ | ||
22 | #define SA_RESTORER 0x04000000u | ||
23 | |||
24 | #include <asm-generic/signal.h> | ||
25 | |||
26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | ||
27 | int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); | ||
28 | int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); | ||
29 | #endif | ||
30 | |||
31 | #endif /* _ASM_TILE_SIGNAL_H */ | ||
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h new file mode 100644 index 000000000000..da24858a7392 --- /dev/null +++ b/arch/tile/include/asm/smp.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SMP_H | ||
16 | #define _ASM_TILE_SMP_H | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | |||
20 | #include <asm/processor.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/irqreturn.h> | ||
23 | |||
24 | /* Set up this tile to support receiving hypervisor messages */ | ||
25 | void init_messaging(void); | ||
26 | |||
27 | /* Set up this tile to support receiving device interrupts and IPIs. */ | ||
28 | void init_per_tile_IRQs(void); | ||
29 | |||
30 | /* Send a message to processors specified in mask */ | ||
31 | void send_IPI_many(const struct cpumask *mask, int tag); | ||
32 | |||
33 | /* Send a message to all but the sending processor */ | ||
34 | void send_IPI_allbutself(int tag); | ||
35 | |||
36 | /* Send a message to a specific processor */ | ||
37 | void send_IPI_single(int dest, int tag); | ||
38 | |||
39 | /* Process an IPI message */ | ||
40 | void evaluate_message(int tag); | ||
41 | |||
42 | /* Process an IRQ_RESCHEDULE IPI. */ | ||
43 | irqreturn_t handle_reschedule_ipi(int irq, void *token); | ||
44 | |||
45 | /* Boot a secondary cpu */ | ||
46 | void online_secondary(void); | ||
47 | |||
48 | /* Call a function on a specified set of CPUs (may include this one). */ | ||
49 | extern void on_each_cpu_mask(const struct cpumask *mask, | ||
50 | void (*func)(void *), void *info, bool wait); | ||
51 | |||
52 | /* Topology of the supervisor tile grid, and coordinates of boot processor */ | ||
53 | extern HV_Topology smp_topology; | ||
54 | |||
55 | /* Accessors for grid size */ | ||
56 | #define smp_height (smp_topology.height) | ||
57 | #define smp_width (smp_topology.width) | ||
58 | |||
59 | /* Hypervisor message tags sent via the tile send_IPI*() routines. */ | ||
60 | #define MSG_TAG_START_CPU 1 | ||
61 | #define MSG_TAG_STOP_CPU 2 | ||
62 | #define MSG_TAG_CALL_FUNCTION_MANY 3 | ||
63 | #define MSG_TAG_CALL_FUNCTION_SINGLE 4 | ||
64 | |||
65 | /* Hook for the generic smp_call_function_many() routine. */ | ||
66 | static inline void arch_send_call_function_ipi_mask(struct cpumask *mask) | ||
67 | { | ||
68 | send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY); | ||
69 | } | ||
70 | |||
71 | /* Hook for the generic smp_call_function_single() routine. */ | ||
72 | static inline void arch_send_call_function_single_ipi(int cpu) | ||
73 | { | ||
74 | send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE); | ||
75 | } | ||
76 | |||
77 | /* Print out the boot string describing which cpus were disabled. */ | ||
78 | void print_disabled_cpus(void); | ||
79 | |||
80 | #else /* !CONFIG_SMP */ | ||
81 | |||
82 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
83 | do { if (cpumask_test_cpu(0, (mask))) func(info); } while (0) | ||
84 | |||
85 | #define smp_master_cpu 0 | ||
86 | #define smp_height 1 | ||
87 | #define smp_width 1 | ||
88 | |||
89 | #endif /* !CONFIG_SMP */ | ||
90 | |||
91 | |||
92 | /* Which cpus may be used as the lotar in a page table entry. */ | ||
93 | extern struct cpumask cpu_lotar_map; | ||
94 | #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) | ||
95 | |||
96 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
97 | /* Which processors are used for hash-for-home mapping */ | ||
98 | extern struct cpumask hash_for_home_map; | ||
99 | #endif | ||
100 | |||
101 | /* Which cpus can have their cache flushed by hv_flush_remote(). */ | ||
102 | extern struct cpumask cpu_cacheable_map; | ||
103 | #define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map) | ||
104 | |||
105 | /* Convert an HV_LOTAR value into a cpu. */ | ||
106 | static inline int hv_lotar_to_cpu(HV_LOTAR lotar) | ||
107 | { | ||
108 | return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Extension of <linux/cpumask.h> functionality when you just want | ||
113 | * to express a mask or suppression or inclusion region without | ||
114 | * being too concerned about exactly which cpus are valid in that region. | ||
115 | */ | ||
116 | int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits); | ||
117 | |||
118 | #define cpulist_parse_crop(buf, dst) \ | ||
119 | __cpulist_parse_crop((buf), (dst), NR_CPUS) | ||
120 | static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp, | ||
121 | int nbits) | ||
122 | { | ||
123 | return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits); | ||
124 | } | ||
125 | |||
126 | #endif /* _ASM_TILE_SMP_H */ | ||
diff --git a/arch/tile/include/asm/socket.h b/arch/tile/include/asm/socket.h new file mode 100644 index 000000000000..6b71384b9d8b --- /dev/null +++ b/arch/tile/include/asm/socket.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/socket.h> | |||
diff --git a/arch/tile/include/asm/sockios.h b/arch/tile/include/asm/sockios.h new file mode 100644 index 000000000000..def6d4746ee7 --- /dev/null +++ b/arch/tile/include/asm/sockios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sockios.h> | |||
diff --git a/arch/tile/include/asm/spinlock.h b/arch/tile/include/asm/spinlock.h new file mode 100644 index 000000000000..1a8bd4740c28 --- /dev/null +++ b/arch/tile/include/asm/spinlock.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_H | ||
16 | #define _ASM_TILE_SPINLOCK_H | ||
17 | |||
18 | #ifdef __tilegx__ | ||
19 | #include <asm/spinlock_64.h> | ||
20 | #else | ||
21 | #include <asm/spinlock_32.h> | ||
22 | #endif | ||
23 | |||
24 | #endif /* _ASM_TILE_SPINLOCK_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h new file mode 100644 index 000000000000..f3a8473c68da --- /dev/null +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * 32-bit SMP spinlocks. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_SPINLOCK_32_H | ||
18 | #define _ASM_TILE_SPINLOCK_32_H | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <linux/compiler.h> | ||
24 | |||
25 | /* | ||
26 | * We only use even ticket numbers so the '1' inserted by a tns is | ||
27 | * an unambiguous "ticket is busy" flag. | ||
28 | */ | ||
29 | #define TICKET_QUANTUM 2 | ||
30 | |||
31 | |||
32 | /* | ||
33 | * SMP ticket spinlocks, allowing only a single CPU anywhere | ||
34 | * | ||
35 | * (the type definitions are in asm/spinlock_types.h) | ||
36 | */ | ||
37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
38 | { | ||
39 | /* | ||
40 | * Note that even if a new ticket is in the process of being | ||
41 | * acquired, so lock->next_ticket is 1, it's still reasonable | ||
42 | * to claim the lock is held, since it will be momentarily | ||
43 | * if not already. There's no need to wait for a "valid" | ||
44 | * lock->next_ticket to become available. | ||
45 | */ | ||
46 | return lock->next_ticket != lock->current_ticket; | ||
47 | } | ||
48 | |||
49 | void arch_spin_lock(arch_spinlock_t *lock); | ||
50 | |||
51 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
52 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
53 | |||
54 | int arch_spin_trylock(arch_spinlock_t *lock); | ||
55 | |||
56 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
57 | { | ||
58 | /* For efficiency, overlap fetching the old ticket with the wmb(). */ | ||
59 | int old_ticket = lock->current_ticket; | ||
60 | wmb(); /* guarantee anything modified under the lock is visible */ | ||
61 | lock->current_ticket = old_ticket + TICKET_QUANTUM; | ||
62 | } | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
65 | |||
66 | /* | ||
67 | * Read-write spinlocks, allowing multiple readers | ||
68 | * but only one writer. | ||
69 | * | ||
70 | * We use a "tns/store-back" technique on a single word to manage | ||
71 | * the lock state, looping around to retry if the tns returns 1. | ||
72 | */ | ||
73 | |||
74 | /* Internal layout of the word; do not use. */ | ||
75 | #define _WR_NEXT_SHIFT 8 | ||
76 | #define _WR_CURR_SHIFT 16 | ||
77 | #define _WR_WIDTH 8 | ||
78 | #define _RD_COUNT_SHIFT 24 | ||
79 | #define _RD_COUNT_WIDTH 8 | ||
80 | |||
81 | /* Internal functions; do not use. */ | ||
82 | void arch_read_lock_slow(arch_rwlock_t *, u32); | ||
83 | int arch_read_trylock_slow(arch_rwlock_t *); | ||
84 | void arch_read_unlock_slow(arch_rwlock_t *); | ||
85 | void arch_write_lock_slow(arch_rwlock_t *, u32); | ||
86 | void arch_write_unlock_slow(arch_rwlock_t *, u32); | ||
87 | |||
88 | /** | ||
89 | * arch_read_can_lock() - would read_trylock() succeed? | ||
90 | */ | ||
91 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
92 | { | ||
93 | return (rwlock->lock << _RD_COUNT_WIDTH) == 0; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * arch_write_can_lock() - would write_trylock() succeed? | ||
98 | */ | ||
99 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
100 | { | ||
101 | return rwlock->lock == 0; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * arch_read_lock() - acquire a read lock. | ||
106 | */ | ||
107 | static inline void arch_read_lock(arch_rwlock_t *rwlock) | ||
108 | { | ||
109 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
110 | if (unlikely(val << _RD_COUNT_WIDTH)) { | ||
111 | arch_read_lock_slow(rwlock, val); | ||
112 | return; | ||
113 | } | ||
114 | rwlock->lock = val + (1 << _RD_COUNT_SHIFT); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * arch_read_lock() - acquire a write lock. | ||
119 | */ | ||
120 | static inline void arch_write_lock(arch_rwlock_t *rwlock) | ||
121 | { | ||
122 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
123 | if (unlikely(val != 0)) { | ||
124 | arch_write_lock_slow(rwlock, val); | ||
125 | return; | ||
126 | } | ||
127 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * arch_read_trylock() - try to acquire a read lock. | ||
132 | */ | ||
133 | static inline int arch_read_trylock(arch_rwlock_t *rwlock) | ||
134 | { | ||
135 | int locked; | ||
136 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
137 | if (unlikely(val & 1)) { | ||
138 | return arch_read_trylock_slow(rwlock); | ||
139 | } | ||
140 | locked = (val << _RD_COUNT_WIDTH) == 0; | ||
141 | rwlock->lock = val + (locked << _RD_COUNT_SHIFT); | ||
142 | return locked; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * arch_write_trylock() - try to acquire a write lock. | ||
147 | */ | ||
148 | static inline int arch_write_trylock(arch_rwlock_t *rwlock) | ||
149 | { | ||
150 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
151 | |||
152 | /* | ||
153 | * If a tns is in progress, or there's a waiting or active locker, | ||
154 | * or active readers, we can't take the lock, so give up. | ||
155 | */ | ||
156 | if (unlikely(val != 0)) { | ||
157 | if (!(val & 1)) | ||
158 | rwlock->lock = val; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* Set the "next" field to mark it locked. */ | ||
163 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
164 | return 1; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * arch_read_unlock() - release a read lock. | ||
169 | */ | ||
170 | static inline void arch_read_unlock(arch_rwlock_t *rwlock) | ||
171 | { | ||
172 | u32 val; | ||
173 | mb(); /* guarantee anything modified under the lock is visible */ | ||
174 | val = __insn_tns((int *)&rwlock->lock); | ||
175 | if (unlikely(val & 1)) { | ||
176 | arch_read_unlock_slow(rwlock); | ||
177 | return; | ||
178 | } | ||
179 | rwlock->lock = val - (1 << _RD_COUNT_SHIFT); | ||
180 | } | ||
181 | |||
182 | /** | ||
183 | * arch_write_unlock() - release a write lock. | ||
184 | */ | ||
185 | static inline void arch_write_unlock(arch_rwlock_t *rwlock) | ||
186 | { | ||
187 | u32 val; | ||
188 | mb(); /* guarantee anything modified under the lock is visible */ | ||
189 | val = __insn_tns((int *)&rwlock->lock); | ||
190 | if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { | ||
191 | arch_write_unlock_slow(rwlock, val); | ||
192 | return; | ||
193 | } | ||
194 | rwlock->lock = 0; | ||
195 | } | ||
196 | |||
197 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
198 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
199 | |||
200 | #endif /* _ASM_TILE_SPINLOCK_32_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_types.h b/arch/tile/include/asm/spinlock_types.h new file mode 100644 index 000000000000..a71f59b49c50 --- /dev/null +++ b/arch/tile/include/asm/spinlock_types.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_TYPES_H | ||
16 | #define _ASM_TILE_SPINLOCK_TYPES_H | ||
17 | |||
18 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
19 | # error "please don't include this file directly" | ||
20 | #endif | ||
21 | |||
22 | #ifdef __tilegx__ | ||
23 | |||
24 | /* Low 15 bits are "next"; high 15 bits are "current". */ | ||
25 | typedef struct arch_spinlock { | ||
26 | unsigned int lock; | ||
27 | } arch_spinlock_t; | ||
28 | |||
29 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
30 | |||
31 | /* High bit is "writer owns"; low 31 bits are a count of readers. */ | ||
32 | typedef struct arch_rwlock { | ||
33 | unsigned int lock; | ||
34 | } arch_rwlock_t; | ||
35 | |||
36 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
37 | |||
38 | #else | ||
39 | |||
40 | typedef struct arch_spinlock { | ||
41 | /* Next ticket number to hand out. */ | ||
42 | int next_ticket; | ||
43 | /* The ticket number that currently owns this lock. */ | ||
44 | int current_ticket; | ||
45 | } arch_spinlock_t; | ||
46 | |||
47 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 } | ||
48 | |||
49 | /* | ||
50 | * Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next", | ||
51 | * byte 2 for ticket-lock "current", byte 3 for reader count. | ||
52 | */ | ||
53 | typedef struct arch_rwlock { | ||
54 | unsigned int lock; | ||
55 | } arch_rwlock_t; | ||
56 | |||
57 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
58 | |||
59 | #endif | ||
60 | #endif /* _ASM_TILE_SPINLOCK_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h new file mode 100644 index 000000000000..864913bcfbc9 --- /dev/null +++ b/arch/tile/include/asm/stack.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STACK_H | ||
16 | #define _ASM_TILE_STACK_H | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <asm/backtrace.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | /* Everything we need to keep track of a backtrace iteration */ | ||
24 | struct KBacktraceIterator { | ||
25 | BacktraceIterator it; | ||
26 | struct task_struct *task; /* task we are backtracing */ | ||
27 | HV_PTE *pgtable; /* page table for user space access */ | ||
28 | int end; /* iteration complete. */ | ||
29 | int new_context; /* new context is starting */ | ||
30 | int profile; /* profiling, so stop on async intrpt */ | ||
31 | int verbose; /* printk extra info (don't want to | ||
32 | * do this for profiling) */ | ||
33 | int is_current; /* backtracing current task */ | ||
34 | }; | ||
35 | |||
36 | /* Iteration methods for kernel backtraces */ | ||
37 | |||
38 | /* | ||
39 | * Initialize a KBacktraceIterator from a task_struct, and optionally from | ||
40 | * a set of registers. If the registers are omitted, the process is | ||
41 | * assumed to be descheduled, and registers are read from the process's | ||
42 | * thread_struct and stack. "verbose" means to printk some additional | ||
43 | * information about fault handlers as we pass them on the stack. | ||
44 | */ | ||
45 | extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
46 | struct task_struct *, struct pt_regs *); | ||
47 | |||
48 | /* Initialize iterator based on current stack. */ | ||
49 | extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt); | ||
50 | |||
51 | /* No more frames? */ | ||
52 | extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt); | ||
53 | |||
54 | /* Advance to the next frame. */ | ||
55 | extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt); | ||
56 | |||
57 | /* | ||
58 | * Dump stack given complete register info. Use only from the | ||
59 | * architecture-specific code; show_stack() | ||
60 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
61 | */ | ||
62 | extern void tile_show_stack(struct KBacktraceIterator *, int headers); | ||
63 | |||
64 | /* Dump stack of current process, with registers to seed the backtrace. */ | ||
65 | extern void dump_stack_regs(struct pt_regs *); | ||
66 | |||
67 | |||
68 | #endif /* _ASM_TILE_STACK_H */ | ||
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h new file mode 100644 index 000000000000..3dc90fa92c70 --- /dev/null +++ b/arch/tile/include/asm/stat.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/stat.h> | |||
diff --git a/arch/tile/include/asm/statfs.h b/arch/tile/include/asm/statfs.h new file mode 100644 index 000000000000..0b91fe198c20 --- /dev/null +++ b/arch/tile/include/asm/statfs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/statfs.h> | |||
diff --git a/arch/tile/include/asm/string.h b/arch/tile/include/asm/string.h new file mode 100644 index 000000000000..7535cf1a30e4 --- /dev/null +++ b/arch/tile/include/asm/string.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STRING_H | ||
16 | #define _ASM_TILE_STRING_H | ||
17 | |||
18 | #define __HAVE_ARCH_MEMCHR | ||
19 | #define __HAVE_ARCH_MEMSET | ||
20 | #define __HAVE_ARCH_MEMCPY | ||
21 | #define __HAVE_ARCH_MEMMOVE | ||
22 | #define __HAVE_ARCH_STRCHR | ||
23 | #define __HAVE_ARCH_STRLEN | ||
24 | |||
25 | extern __kernel_size_t strlen(const char *); | ||
26 | extern char *strchr(const char *s, int c); | ||
27 | extern void *memchr(const void *s, int c, size_t n); | ||
28 | extern void *memset(void *, int, __kernel_size_t); | ||
29 | extern void *memcpy(void *, const void *, __kernel_size_t); | ||
30 | extern void *memmove(void *, const void *, __kernel_size_t); | ||
31 | |||
32 | #endif /* _ASM_TILE_STRING_H */ | ||
diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h new file mode 100644 index 000000000000..25c686a00f1d --- /dev/null +++ b/arch/tile/include/asm/swab.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SWAB_H | ||
16 | #define _ASM_TILE_SWAB_H | ||
17 | |||
18 | /* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ | ||
19 | #define __arch_swab32(x) __builtin_bswap32(x) | ||
20 | #define __arch_swab64(x) __builtin_bswap64(x) | ||
21 | |||
22 | /* Use the variant that is natural for the wordsize. */ | ||
23 | #ifdef CONFIG_64BIT | ||
24 | #define __arch_swab16(x) (__builtin_bswap64(x) >> 48) | ||
25 | #else | ||
26 | #define __arch_swab16(x) (__builtin_bswap32(x) >> 16) | ||
27 | #endif | ||
28 | |||
29 | #endif /* _ASM_TILE_SWAB_H */ | ||
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h new file mode 100644 index 000000000000..d35e0dcb67b1 --- /dev/null +++ b/arch/tile/include/asm/syscall.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * See asm-generic/syscall.h for descriptions of what we must do here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALL_H | ||
19 | #define _ASM_TILE_SYSCALL_H | ||
20 | |||
21 | #include <linux/sched.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <arch/abi.h> | ||
24 | |||
25 | /* | ||
26 | * Only the low 32 bits of orig_r0 are meaningful, so we return int. | ||
27 | * This importantly ignores the high bits on 64-bit, so comparisons | ||
28 | * sign-extend the low 32 bits. | ||
29 | */ | ||
30 | static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs) | ||
31 | { | ||
32 | return regs->regs[TREG_SYSCALL_NR]; | ||
33 | } | ||
34 | |||
35 | static inline void syscall_rollback(struct task_struct *task, | ||
36 | struct pt_regs *regs) | ||
37 | { | ||
38 | regs->regs[0] = regs->orig_r0; | ||
39 | } | ||
40 | |||
41 | static inline long syscall_get_error(struct task_struct *task, | ||
42 | struct pt_regs *regs) | ||
43 | { | ||
44 | unsigned long error = regs->regs[0]; | ||
45 | return IS_ERR_VALUE(error) ? error : 0; | ||
46 | } | ||
47 | |||
48 | static inline long syscall_get_return_value(struct task_struct *task, | ||
49 | struct pt_regs *regs) | ||
50 | { | ||
51 | return regs->regs[0]; | ||
52 | } | ||
53 | |||
54 | static inline void syscall_set_return_value(struct task_struct *task, | ||
55 | struct pt_regs *regs, | ||
56 | int error, long val) | ||
57 | { | ||
58 | regs->regs[0] = (long) error ?: val; | ||
59 | } | ||
60 | |||
61 | static inline void syscall_get_arguments(struct task_struct *task, | ||
62 | struct pt_regs *regs, | ||
63 | unsigned int i, unsigned int n, | ||
64 | unsigned long *args) | ||
65 | { | ||
66 | BUG_ON(i + n > 6); | ||
67 | memcpy(args, ®s[i], n * sizeof(args[0])); | ||
68 | } | ||
69 | |||
70 | static inline void syscall_set_arguments(struct task_struct *task, | ||
71 | struct pt_regs *regs, | ||
72 | unsigned int i, unsigned int n, | ||
73 | const unsigned long *args) | ||
74 | { | ||
75 | BUG_ON(i + n > 6); | ||
76 | memcpy(®s[i], args, n * sizeof(args[0])); | ||
77 | } | ||
78 | |||
79 | #endif /* _ASM_TILE_SYSCALL_H */ | ||
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h new file mode 100644 index 000000000000..e1be54d1a7d8 --- /dev/null +++ b/arch/tile/include/asm/syscalls.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * syscalls.h - Linux syscall interfaces (arch-specific) | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | ||
5 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation, version 2. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for | ||
15 | * more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALLS_H | ||
19 | #define _ASM_TILE_SYSCALLS_H | ||
20 | |||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/linkage.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | /* kernel/process.c */ | ||
27 | int sys_fork(struct pt_regs *); | ||
28 | int sys_vfork(struct pt_regs *); | ||
29 | int sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
30 | int __user *parent_tidptr, int __user *child_tidptr, | ||
31 | struct pt_regs *); | ||
32 | int sys_execve(char __user *path, char __user *__user *argv, | ||
33 | char __user *__user *envp, struct pt_regs *); | ||
34 | |||
35 | /* kernel/signal.c */ | ||
36 | int sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
37 | struct pt_regs *); | ||
38 | long sys_rt_sigreturn(struct pt_regs *); | ||
39 | int sys_raise_fpe(int code, unsigned long addr, struct pt_regs*); | ||
40 | |||
41 | /* kernel/sys.c */ | ||
42 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count); | ||
43 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
44 | u32 len, int advice); | ||
45 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
46 | u32 len_lo, u32 len_hi, int advice); | ||
47 | long sys_flush_cache(void); | ||
48 | long sys_mmap(unsigned long addr, unsigned long len, | ||
49 | unsigned long prot, unsigned long flags, | ||
50 | unsigned long fd, unsigned long offset); | ||
51 | long sys_mmap2(unsigned long addr, unsigned long len, | ||
52 | unsigned long prot, unsigned long flags, | ||
53 | unsigned long fd, unsigned long offset); | ||
54 | |||
55 | #ifndef __tilegx__ | ||
56 | /* mm/fault.c */ | ||
57 | int sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); | ||
58 | #endif | ||
59 | |||
60 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h new file mode 100644 index 000000000000..d6ca7f816c87 --- /dev/null +++ b/arch/tile/include/asm/system.h | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SYSTEM_H | ||
16 | #define _ASM_TILE_SYSTEM_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/irqflags.h> | ||
22 | |||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | #include <arch/chip.h> | ||
27 | #include <arch/sim_def.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | /* | ||
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
32 | * depend on. | ||
33 | * | ||
34 | * No data-dependent reads from memory-like regions are ever reordered | ||
35 | * over this barrier. All reads preceding this primitive are guaranteed | ||
36 | * to access memory (but not necessarily other CPUs' caches) before any | ||
37 | * reads following this primitive that depend on the data return by | ||
38 | * any of the preceding reads. This primitive is much lighter weight than | ||
39 | * rmb() on most CPUs, and is never heavier weight than is | ||
40 | * rmb(). | ||
41 | * | ||
42 | * These ordering constraints are respected by both the local CPU | ||
43 | * and the compiler. | ||
44 | * | ||
45 | * Ordering is not guaranteed by anything other than these primitives, | ||
46 | * not even by data dependencies. See the documentation for | ||
47 | * memory_barrier() for examples and URLs to more information. | ||
48 | * | ||
49 | * For example, the following code would force ordering (the initial | ||
50 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
51 | * | ||
52 | * <programlisting> | ||
53 | * CPU 0 CPU 1 | ||
54 | * | ||
55 | * b = 2; | ||
56 | * memory_barrier(); | ||
57 | * p = &b; q = p; | ||
58 | * read_barrier_depends(); | ||
59 | * d = *q; | ||
60 | * </programlisting> | ||
61 | * | ||
62 | * because the read of "*q" depends on the read of "p" and these | ||
63 | * two reads are separated by a read_barrier_depends(). However, | ||
64 | * the following code, with the same initial values for "a" and "b": | ||
65 | * | ||
66 | * <programlisting> | ||
67 | * CPU 0 CPU 1 | ||
68 | * | ||
69 | * a = 2; | ||
70 | * memory_barrier(); | ||
71 | * b = 3; y = b; | ||
72 | * read_barrier_depends(); | ||
73 | * x = a; | ||
74 | * </programlisting> | ||
75 | * | ||
76 | * does not enforce ordering, since there is no data dependency between | ||
77 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
79 | * in cases like this where there are no data dependencies. | ||
80 | */ | ||
81 | |||
82 | #define read_barrier_depends() do { } while (0) | ||
83 | |||
84 | #define __sync() __insn_mf() | ||
85 | |||
86 | #if CHIP_HAS_SPLIT_CYCLE() | ||
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
88 | #else | ||
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
90 | #endif | ||
91 | |||
92 | /* Fence to guarantee visibility of stores to incoherent memory. */ | ||
93 | static inline void | ||
94 | mb_incoherent(void) | ||
95 | { | ||
96 | __insn_mf(); | ||
97 | |||
98 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
99 | { | ||
100 | int __mb_incoherent(void); | ||
101 | #if CHIP_HAS_TILE_WRITE_PENDING() | ||
102 | const unsigned long WRITE_TIMEOUT_CYCLES = 400; | ||
103 | unsigned long start = get_cycles_low(); | ||
104 | do { | ||
105 | if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) | ||
106 | return; | ||
107 | } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); | ||
108 | #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ | ||
109 | (void) __mb_incoherent(); | ||
110 | } | ||
111 | #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ | ||
112 | } | ||
113 | |||
114 | #define fast_wmb() __sync() | ||
115 | #define fast_rmb() __sync() | ||
116 | #define fast_mb() __sync() | ||
117 | #define fast_iob() mb_incoherent() | ||
118 | |||
119 | #define wmb() fast_wmb() | ||
120 | #define rmb() fast_rmb() | ||
121 | #define mb() fast_mb() | ||
122 | #define iob() fast_iob() | ||
123 | |||
124 | #ifdef CONFIG_SMP | ||
125 | #define smp_mb() mb() | ||
126 | #define smp_rmb() rmb() | ||
127 | #define smp_wmb() wmb() | ||
128 | #define smp_read_barrier_depends() read_barrier_depends() | ||
129 | #else | ||
130 | #define smp_mb() barrier() | ||
131 | #define smp_rmb() barrier() | ||
132 | #define smp_wmb() barrier() | ||
133 | #define smp_read_barrier_depends() do { } while (0) | ||
134 | #endif | ||
135 | |||
136 | #define set_mb(var, value) \ | ||
137 | do { var = value; mb(); } while (0) | ||
138 | |||
139 | #include <linux/irqflags.h> | ||
140 | |||
141 | /* | ||
142 | * Pause the DMA engine and static network before task switching. | ||
143 | */ | ||
144 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
145 | void _prepare_arch_switch(struct task_struct *next); | ||
146 | |||
147 | |||
148 | /* | ||
149 | * switch_to(n) should switch tasks to task nr n, first | ||
150 | * checking that n isn't the current task, in which case it does nothing. | ||
151 | * The number of callee-saved registers saved on the kernel stack | ||
152 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
153 | */ | ||
154 | #endif /* !__ASSEMBLY__ */ | ||
155 | #define CALLEE_SAVED_FIRST_REG 30 | ||
156 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
157 | #ifndef __ASSEMBLY__ | ||
158 | struct task_struct; | ||
159 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
160 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
161 | struct task_struct *next); | ||
162 | |||
163 | /* | ||
164 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
165 | * it needs a way to flush as much of the CPU's caches as possible: | ||
166 | * | ||
167 | * TODO: fill this in! | ||
168 | */ | ||
169 | static inline void sched_cacheflush(void) | ||
170 | { | ||
171 | } | ||
172 | |||
173 | #define arch_align_stack(x) (x) | ||
174 | |||
175 | /* | ||
176 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
177 | * intervention occurs and SIGBUS is delivered with no data address | ||
178 | * info. If 0, the kernel single-steps the instruction to discover | ||
179 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
180 | * a fixup. | ||
181 | */ | ||
182 | extern int unaligned_fixup; | ||
183 | |||
184 | /* Is the kernel printing on each unaligned fixup? */ | ||
185 | extern int unaligned_printk; | ||
186 | |||
187 | /* Number of unaligned fixups performed */ | ||
188 | extern unsigned int unaligned_fixup_count; | ||
189 | |||
190 | /* User-level DMA management functions */ | ||
191 | void grant_dma_mpls(void); | ||
192 | void restrict_dma_mpls(void); | ||
193 | |||
194 | |||
195 | /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ | ||
196 | extern int _sim_syscall(int syscall_num, ...); | ||
197 | #define sim_syscall(syscall_num, ...) \ | ||
198 | _sim_syscall(SIM_CONTROL_SYSCALL + \ | ||
199 | ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \ | ||
200 | ## __VA_ARGS__) | ||
201 | |||
202 | /* | ||
203 | * Kernel threads can check to see if they need to migrate their | ||
204 | * stack whenever they return from a context switch; for user | ||
205 | * threads, we defer until they are returning to user-space. | ||
206 | */ | ||
207 | #define finish_arch_switch(prev) do { \ | ||
208 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
209 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
210 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
211 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
212 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
213 | if (current->mm == NULL && !kstack_hash && \ | ||
214 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
215 | homecache_migrate_kthread(); \ | ||
216 | } while (0) | ||
217 | |||
218 | #endif /* !__ASSEMBLY__ */ | ||
219 | |||
220 | #endif /* _ASM_TILE_SYSTEM_H */ | ||
diff --git a/arch/tile/include/asm/termbits.h b/arch/tile/include/asm/termbits.h new file mode 100644 index 000000000000..3935b106de79 --- /dev/null +++ b/arch/tile/include/asm/termbits.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termbits.h> | |||
diff --git a/arch/tile/include/asm/termios.h b/arch/tile/include/asm/termios.h new file mode 100644 index 000000000000..280d78a9d966 --- /dev/null +++ b/arch/tile/include/asm/termios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termios.h> | |||
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h new file mode 100644 index 000000000000..9024bf3530aa --- /dev/null +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_THREAD_INFO_H | ||
17 | #define _ASM_TILE_THREAD_INFO_H | ||
18 | |||
19 | #include <asm/processor.h> | ||
20 | #include <asm/page.h> | ||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | /* | ||
24 | * Low level task data that assembly code needs immediate access to. | ||
25 | * The structure is placed at the bottom of the supervisor stack. | ||
26 | */ | ||
27 | struct thread_info { | ||
28 | struct task_struct *task; /* main task structure */ | ||
29 | struct exec_domain *exec_domain; /* execution domain */ | ||
30 | unsigned long flags; /* low level flags */ | ||
31 | unsigned long status; /* thread-synchronous flags */ | ||
32 | __u32 homecache_cpu; /* CPU we are homecached on */ | ||
33 | __u32 cpu; /* current CPU */ | ||
34 | int preempt_count; /* 0 => preemptable, | ||
35 | <0 => BUG */ | ||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space | ||
38 | (KERNEL_DS or USER_DS) */ | ||
39 | struct restart_block restart_block; | ||
40 | struct single_step_state *step_state; /* single step state | ||
41 | (if non-zero) */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * macros/functions for gaining access to the thread information structure. | ||
46 | */ | ||
47 | #define INIT_THREAD_INFO(tsk) \ | ||
48 | { \ | ||
49 | .task = &tsk, \ | ||
50 | .exec_domain = &default_exec_domain, \ | ||
51 | .flags = 0, \ | ||
52 | .cpu = 0, \ | ||
53 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
54 | .addr_limit = KERNEL_DS, \ | ||
55 | .restart_block = { \ | ||
56 | .fn = do_no_restart_syscall, \ | ||
57 | }, \ | ||
58 | .step_state = 0, \ | ||
59 | } | ||
60 | |||
61 | #define init_thread_info (init_thread_union.thread_info) | ||
62 | #define init_stack (init_thread_union.stack) | ||
63 | |||
64 | #endif /* !__ASSEMBLY__ */ | ||
65 | |||
66 | #if PAGE_SIZE < 8192 | ||
67 | #define THREAD_SIZE_ORDER (13 - PAGE_SHIFT) | ||
68 | #else | ||
69 | #define THREAD_SIZE_ORDER (0) | ||
70 | #endif | ||
71 | |||
72 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
73 | #define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER) | ||
74 | |||
75 | #define STACK_WARN (THREAD_SIZE/8) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | |||
79 | /* How to get the thread information struct from C. */ | ||
80 | register unsigned long stack_pointer __asm__("sp"); | ||
81 | |||
82 | #define current_thread_info() \ | ||
83 | ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) | ||
84 | |||
85 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
86 | extern struct thread_info *alloc_thread_info(struct task_struct *task); | ||
87 | extern void free_thread_info(struct thread_info *info); | ||
88 | |||
89 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
90 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
91 | unsigned long new_sp, | ||
92 | unsigned long new_ss10); | ||
93 | |||
94 | #else /* __ASSEMBLY__ */ | ||
95 | |||
96 | /* how to get the thread information struct from ASM */ | ||
97 | #ifdef __tilegx__ | ||
98 | #define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 | ||
99 | #else | ||
100 | #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 | ||
101 | #endif | ||
102 | |||
103 | #endif /* !__ASSEMBLY__ */ | ||
104 | |||
105 | #define PREEMPT_ACTIVE 0x10000000 | ||
106 | |||
107 | /* | ||
108 | * Thread information flags that various assembly files may need to access. | ||
109 | * Keep flags accessed frequently in low bits, particular since it makes | ||
110 | * it easier to build constants in assembly. | ||
111 | */ | ||
112 | #define TIF_SIGPENDING 0 /* signal pending */ | ||
113 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ | ||
114 | #define TIF_SINGLESTEP 2 /* restore singlestep on return to | ||
115 | user mode */ | ||
116 | #define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */ | ||
117 | #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ | ||
118 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | ||
119 | #define TIF_SECCOMP 6 /* secure computing */ | ||
120 | #define TIF_MEMDIE 7 /* OOM killer at work */ | ||
121 | |||
122 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
123 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
124 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
125 | #define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB) | ||
126 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
127 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
128 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
129 | #define _TIF_MEMDIE (1<<TIF_MEMDIE) | ||
130 | |||
131 | /* Work to do on any return to user space. */ | ||
132 | #define _TIF_ALLWORK_MASK \ | ||
133 | (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) | ||
134 | |||
135 | /* | ||
136 | * Thread-synchronous status. | ||
137 | * | ||
138 | * This is different from the flags in that nobody else | ||
139 | * ever touches our thread-synchronous status, so we don't | ||
140 | * have to worry about atomic accesses. | ||
141 | */ | ||
142 | #ifdef __tilegx__ | ||
143 | #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ | ||
144 | #endif | ||
145 | #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ | ||
146 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ | ||
147 | #define TS_EXEC_HASH_SET 0x0010 /* apply TS_EXEC_HASH_xxx flags */ | ||
148 | #define TS_EXEC_HASH_RO 0x0020 /* during exec, hash r/o segments */ | ||
149 | #define TS_EXEC_HASH_RW 0x0040 /* during exec, hash r/w segments */ | ||
150 | #define TS_EXEC_HASH_STACK 0x0080 /* during exec, hash the stack */ | ||
151 | #define TS_EXEC_HASH_FLAGS 0x00f0 /* mask for TS_EXEC_HASH_xxx flags */ | ||
152 | |||
153 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
154 | |||
155 | #ifndef __ASSEMBLY__ | ||
156 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
157 | static inline void set_restore_sigmask(void) | ||
158 | { | ||
159 | struct thread_info *ti = current_thread_info(); | ||
160 | ti->status |= TS_RESTORE_SIGMASK; | ||
161 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
162 | } | ||
163 | #endif /* !__ASSEMBLY__ */ | ||
164 | |||
165 | #endif /* _ASM_TILE_THREAD_INFO_H */ | ||
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h new file mode 100644 index 000000000000..3baf5fc4c0a1 --- /dev/null +++ b/arch/tile/include/asm/timex.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TIMEX_H | ||
16 | #define _ASM_TILE_TIMEX_H | ||
17 | |||
18 | /* | ||
19 | * This rate should be a multiple of the possible HZ values (100, 250, 1000) | ||
20 | * and a fraction of the possible hardware timer frequencies. Our timer | ||
21 | * frequency is highly tunable but also quite precise, so for the primary use | ||
22 | * of this value (setting ACT_HZ from HZ) we just pick a value that causes | ||
23 | * ACT_HZ to be set to HZ. We make the value somewhat large just to be | ||
24 | * more robust in case someone tries out a new value of HZ. | ||
25 | */ | ||
26 | #define CLOCK_TICK_RATE 1000000 | ||
27 | |||
28 | typedef unsigned long long cycles_t; | ||
29 | |||
30 | #if CHIP_HAS_SPLIT_CYCLE() | ||
31 | cycles_t get_cycles(void); | ||
32 | #else | ||
33 | static inline cycles_t get_cycles(void) | ||
34 | { | ||
35 | return __insn_mfspr(SPR_CYCLE); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | cycles_t get_clock_rate(void); | ||
40 | |||
41 | /* Called at cpu initialization to set some low-level constants. */ | ||
42 | void setup_clock(void); | ||
43 | |||
44 | /* Called at cpu initialization to start the tile-timer clock device. */ | ||
45 | void setup_tile_timer(void); | ||
46 | |||
47 | #endif /* _ASM_TILE_TIMEX_H */ | ||
diff --git a/arch/tile/include/asm/tlb.h b/arch/tile/include/asm/tlb.h new file mode 100644 index 000000000000..4a891a1a8df3 --- /dev/null +++ b/arch/tile/include/asm/tlb.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLB_H | ||
16 | #define _ASM_TILE_TLB_H | ||
17 | |||
18 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
19 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
20 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
21 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
22 | |||
23 | #include <asm-generic/tlb.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_TLB_H */ | ||
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h new file mode 100644 index 000000000000..96199d214fb8 --- /dev/null +++ b/arch/tile/include/asm/tlbflush.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLBFLUSH_H | ||
16 | #define _ASM_TILE_TLBFLUSH_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | /* | ||
26 | * Rather than associating each mm with its own ASID, we just use | ||
27 | * ASIDs to allow us to lazily flush the TLB when we switch mms. | ||
28 | * This way we only have to do an actual TLB flush on mm switch | ||
29 | * every time we wrap ASIDs, not every single time we switch. | ||
30 | * | ||
31 | * FIXME: We might improve performance by keeping ASIDs around | ||
32 | * properly, though since the hypervisor direct-maps VAs to TSB | ||
33 | * entries, we're likely to have lost at least the executable page | ||
34 | * mappings by the time we switch back to the original mm. | ||
35 | */ | ||
36 | DECLARE_PER_CPU(int, current_asid); | ||
37 | |||
38 | /* The hypervisor tells us what ASIDs are available to us. */ | ||
39 | extern int min_asid, max_asid; | ||
40 | |||
41 | static inline unsigned long hv_page_size(const struct vm_area_struct *vma) | ||
42 | { | ||
43 | return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE; | ||
44 | } | ||
45 | |||
46 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ | ||
47 | #define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL) | ||
48 | |||
49 | /* Flush a single user page on this cpu. */ | ||
50 | static inline void local_flush_tlb_page(const struct vm_area_struct *vma, | ||
51 | unsigned long addr, | ||
52 | unsigned long page_size) | ||
53 | { | ||
54 | int rc = hv_flush_page(addr, page_size); | ||
55 | if (rc < 0) | ||
56 | panic("hv_flush_page(%#lx,%#lx) failed: %d", | ||
57 | addr, page_size, rc); | ||
58 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
59 | __flush_icache(); | ||
60 | } | ||
61 | |||
62 | /* Flush range of user pages on this cpu. */ | ||
63 | static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, | ||
64 | unsigned long addr, | ||
65 | unsigned long page_size, | ||
66 | unsigned long len) | ||
67 | { | ||
68 | int rc = hv_flush_pages(addr, page_size, len); | ||
69 | if (rc < 0) | ||
70 | panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d", | ||
71 | addr, page_size, len, rc); | ||
72 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
73 | __flush_icache(); | ||
74 | } | ||
75 | |||
76 | /* Flush all user pages on this cpu. */ | ||
77 | static inline void local_flush_tlb(void) | ||
78 | { | ||
79 | int rc = hv_flush_all(1); /* preserve global mappings */ | ||
80 | if (rc < 0) | ||
81 | panic("hv_flush_all(1) failed: %d", rc); | ||
82 | __flush_icache(); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Global pages have to be flushed a bit differently. Not a real | ||
87 | * performance problem because this does not happen often. | ||
88 | */ | ||
89 | static inline void local_flush_tlb_all(void) | ||
90 | { | ||
91 | int i; | ||
92 | for (i = 0; ; ++i) { | ||
93 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
94 | if (r.size == 0) | ||
95 | break; | ||
96 | local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size); | ||
97 | local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * TLB flushing: | ||
103 | * | ||
104 | * - flush_tlb() flushes the current mm struct TLBs | ||
105 | * - flush_tlb_all() flushes all processes TLBs | ||
106 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
107 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
108 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
109 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
110 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | ||
111 | * | ||
112 | * Here (as in vm_area_struct), "end" means the first byte after | ||
113 | * our end address. | ||
114 | */ | ||
115 | |||
116 | extern void flush_tlb_all(void); | ||
117 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
118 | extern void flush_tlb_current_task(void); | ||
119 | extern void flush_tlb_mm(struct mm_struct *); | ||
120 | extern void flush_tlb_page(const struct vm_area_struct *, unsigned long); | ||
121 | extern void flush_tlb_page_mm(const struct vm_area_struct *, | ||
122 | struct mm_struct *, unsigned long); | ||
123 | extern void flush_tlb_range(const struct vm_area_struct *, | ||
124 | unsigned long start, unsigned long end); | ||
125 | |||
126 | #define flush_tlb() flush_tlb_current_task() | ||
127 | |||
128 | #endif /* _ASM_TILE_TLBFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h new file mode 100644 index 000000000000..343172d422a9 --- /dev/null +++ b/arch/tile/include/asm/topology.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TOPOLOGY_H | ||
16 | #define _ASM_TILE_TOPOLOGY_H | ||
17 | |||
18 | #ifdef CONFIG_NUMA | ||
19 | |||
20 | #include <linux/cpumask.h> | ||
21 | |||
22 | /* Mappings between logical cpu number and node number. */ | ||
23 | extern struct cpumask node_2_cpu_mask[]; | ||
24 | extern char cpu_2_node[]; | ||
25 | |||
26 | /* Returns the number of the node containing CPU 'cpu'. */ | ||
27 | static inline int cpu_to_node(int cpu) | ||
28 | { | ||
29 | return cpu_2_node[cpu]; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Returns the number of the node containing Node 'node'. | ||
34 | * This architecture is flat, so it is a pretty simple function! | ||
35 | */ | ||
36 | #define parent_node(node) (node) | ||
37 | |||
38 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
39 | static inline const struct cpumask *cpumask_of_node(int node) | ||
40 | { | ||
41 | return &node_2_cpu_mask[node]; | ||
42 | } | ||
43 | |||
44 | /* For now, use numa node -1 for global allocation. */ | ||
45 | #define pcibus_to_node(bus) ((void)(bus), -1) | ||
46 | |||
47 | /* sched_domains SD_NODE_INIT for TILE architecture */ | ||
48 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
49 | .min_interval = 8, \ | ||
50 | .max_interval = 32, \ | ||
51 | .busy_factor = 32, \ | ||
52 | .imbalance_pct = 125, \ | ||
53 | .cache_nice_tries = 1, \ | ||
54 | .busy_idx = 3, \ | ||
55 | .idle_idx = 1, \ | ||
56 | .newidle_idx = 2, \ | ||
57 | .wake_idx = 1, \ | ||
58 | .flags = SD_LOAD_BALANCE \ | ||
59 | | SD_BALANCE_NEWIDLE \ | ||
60 | | SD_BALANCE_EXEC \ | ||
61 | | SD_BALANCE_FORK \ | ||
62 | | SD_WAKE_AFFINE \ | ||
63 | | SD_SERIALIZE, \ | ||
64 | .last_balance = jiffies, \ | ||
65 | .balance_interval = 1, \ | ||
66 | } | ||
67 | |||
68 | /* By definition, we create nodes based on online memory. */ | ||
69 | #define node_has_online_mem(nid) 1 | ||
70 | |||
71 | #endif /* CONFIG_NUMA */ | ||
72 | |||
73 | #include <asm-generic/topology.h> | ||
74 | |||
75 | #ifdef CONFIG_SMP | ||
76 | #define topology_physical_package_id(cpu) ((void)(cpu), 0) | ||
77 | #define topology_core_id(cpu) (cpu) | ||
78 | #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) | ||
79 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | ||
80 | |||
81 | /* indicates that pointers to the topology struct cpumask maps are valid */ | ||
82 | #define arch_provides_topology_pointers yes | ||
83 | #endif | ||
84 | |||
85 | #endif /* _ASM_TILE_TOPOLOGY_H */ | ||
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h new file mode 100644 index 000000000000..eab33d4a917d --- /dev/null +++ b/arch/tile/include/asm/traps.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TRAPS_H | ||
16 | #define _ASM_TILE_TRAPS_H | ||
17 | |||
18 | /* mm/fault.c */ | ||
19 | void do_page_fault(struct pt_regs *, int fault_num, | ||
20 | unsigned long address, unsigned long write); | ||
21 | |||
22 | /* kernel/traps.c */ | ||
23 | void do_trap(struct pt_regs *, int fault_num, unsigned long reason); | ||
24 | |||
25 | /* kernel/time.c */ | ||
26 | void do_timer_interrupt(struct pt_regs *, int fault_num); | ||
27 | |||
28 | /* kernel/messaging.c */ | ||
29 | void hv_message_intr(struct pt_regs *, int intnum); | ||
30 | |||
31 | /* kernel/irq.c */ | ||
32 | void tile_dev_intr(struct pt_regs *, int intnum); | ||
33 | |||
34 | |||
35 | |||
36 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/types.h b/arch/tile/include/asm/types.h new file mode 100644 index 000000000000..b9e79bc580dd --- /dev/null +++ b/arch/tile/include/asm/types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/types.h> | |||
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h new file mode 100644 index 000000000000..f3058afd5a88 --- /dev/null +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -0,0 +1,578 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UACCESS_H | ||
16 | #define _ASM_TILE_UACCESS_H | ||
17 | |||
18 | /* | ||
19 | * User space memory access functions | ||
20 | */ | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <asm-generic/uaccess-unaligned.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/page.h> | ||
26 | |||
27 | #define VERIFY_READ 0 | ||
28 | #define VERIFY_WRITE 1 | ||
29 | |||
30 | /* | ||
31 | * The fs value determines whether argument validity checking should be | ||
32 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
33 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
34 | * | ||
35 | * For historical reasons, these macros are grossly misnamed. | ||
36 | */ | ||
37 | #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) | ||
38 | |||
39 | #define KERNEL_DS MAKE_MM_SEG(-1UL) | ||
40 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
41 | |||
42 | #define get_ds() (KERNEL_DS) | ||
43 | #define get_fs() (current_thread_info()->addr_limit) | ||
44 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
45 | |||
46 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
47 | |||
48 | #ifndef __tilegx__ | ||
49 | /* | ||
50 | * We could allow mapping all 16 MB at 0xfc000000, but we set up a | ||
51 | * special hack in arch_setup_additional_pages() to auto-create a mapping | ||
52 | * for the first 16 KB, and it would seem strange to have different | ||
53 | * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000. | ||
54 | */ | ||
55 | static inline int is_arch_mappable_range(unsigned long addr, | ||
56 | unsigned long size) | ||
57 | { | ||
58 | return (addr >= MEM_USER_INTRPT && | ||
59 | addr < (MEM_USER_INTRPT + INTRPT_SIZE) && | ||
60 | size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr); | ||
61 | } | ||
62 | #define is_arch_mappable_range is_arch_mappable_range | ||
63 | #else | ||
64 | #define is_arch_mappable_range(addr, size) 0 | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Test whether a block of memory is a valid user space address. | ||
69 | * Returns 0 if the range is valid, nonzero otherwise. | ||
70 | */ | ||
71 | int __range_ok(unsigned long addr, unsigned long size); | ||
72 | |||
73 | /** | ||
74 | * access_ok: - Checks if a user space pointer is valid | ||
75 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
76 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
77 | * to write to a block, it is always safe to read from it. | ||
78 | * @addr: User space pointer to start of block to check | ||
79 | * @size: Size of block to check | ||
80 | * | ||
81 | * Context: User context only. This function may sleep. | ||
82 | * | ||
83 | * Checks if a pointer to a block of memory in user space is valid. | ||
84 | * | ||
85 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
86 | * if it is definitely invalid. | ||
87 | * | ||
88 | * Note that, depending on architecture, this function probably just | ||
89 | * checks that the pointer is in the user space range - after calling | ||
90 | * this function, memory access functions may still return -EFAULT. | ||
91 | */ | ||
92 | #define access_ok(type, addr, size) \ | ||
93 | (likely(__range_ok((unsigned long)addr, size) == 0)) | ||
94 | |||
95 | /* | ||
96 | * The exception table consists of pairs of addresses: the first is the | ||
97 | * address of an instruction that is allowed to fault, and the second is | ||
98 | * the address at which the program should continue. No registers are | ||
99 | * modified, so it is entirely up to the continuation code to figure out | ||
100 | * what to do. | ||
101 | * | ||
102 | * All the routines below use bits of fixup code that are out of line | ||
103 | * with the main instruction path. This means when everything is well, | ||
104 | * we don't even have to jump over them. Further, they do not intrude | ||
105 | * on our cache or tlb entries. | ||
106 | */ | ||
107 | |||
108 | struct exception_table_entry { | ||
109 | unsigned long insn, fixup; | ||
110 | }; | ||
111 | |||
112 | extern int fixup_exception(struct pt_regs *regs); | ||
113 | |||
114 | /* | ||
115 | * We return the __get_user_N function results in a structure, | ||
116 | * thus in r0 and r1. If "err" is zero, "val" is the result | ||
117 | * of the read; otherwise, "err" is -EFAULT. | ||
118 | * | ||
119 | * We rarely need 8-byte values on a 32-bit architecture, but | ||
120 | * we size the structure to accommodate. In practice, for the | ||
121 | * the smaller reads, we can zero the high word for free, and | ||
122 | * the caller will ignore it by virtue of casting anyway. | ||
123 | */ | ||
124 | struct __get_user { | ||
125 | unsigned long long val; | ||
126 | int err; | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * FIXME: we should express these as inline extended assembler, since | ||
131 | * they're fundamentally just a variable dereference and some | ||
132 | * supporting exception_table gunk. Note that (a la i386) we can | ||
133 | * extend the copy_to_user and copy_from_user routines to call into | ||
134 | * such extended assembler routines, though we will have to use a | ||
135 | * different return code in that case (1, 2, or 4, rather than -EFAULT). | ||
136 | */ | ||
137 | extern struct __get_user __get_user_1(const void *); | ||
138 | extern struct __get_user __get_user_2(const void *); | ||
139 | extern struct __get_user __get_user_4(const void *); | ||
140 | extern struct __get_user __get_user_8(const void *); | ||
141 | extern int __put_user_1(long, void *); | ||
142 | extern int __put_user_2(long, void *); | ||
143 | extern int __put_user_4(long, void *); | ||
144 | extern int __put_user_8(long long, void *); | ||
145 | |||
146 | /* Unimplemented routines to cause linker failures */ | ||
147 | extern struct __get_user __get_user_bad(void); | ||
148 | extern int __put_user_bad(void); | ||
149 | |||
150 | /* | ||
151 | * Careful: we have to cast the result to the type of the pointer | ||
152 | * for sign reasons. | ||
153 | */ | ||
154 | /** | ||
155 | * __get_user: - Get a simple variable from user space, with less checking. | ||
156 | * @x: Variable to store result. | ||
157 | * @ptr: Source address, in user space. | ||
158 | * | ||
159 | * Context: User context only. This function may sleep. | ||
160 | * | ||
161 | * This macro copies a single simple variable from user space to kernel | ||
162 | * space. It supports simple types like char and int, but not larger | ||
163 | * data types like structures or arrays. | ||
164 | * | ||
165 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
166 | * dereferencing @ptr must be assignable to @x without a cast. | ||
167 | * | ||
168 | * Returns zero on success, or -EFAULT on error. | ||
169 | * On error, the variable @x is set to zero. | ||
170 | * | ||
171 | * Caller must check the pointer with access_ok() before calling this | ||
172 | * function. | ||
173 | */ | ||
174 | #define __get_user(x, ptr) \ | ||
175 | ({ struct __get_user __ret; \ | ||
176 | __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ | ||
177 | __chk_user_ptr(__gu_addr); \ | ||
178 | switch (sizeof(*(__gu_addr))) { \ | ||
179 | case 1: \ | ||
180 | __ret = __get_user_1(__gu_addr); \ | ||
181 | break; \ | ||
182 | case 2: \ | ||
183 | __ret = __get_user_2(__gu_addr); \ | ||
184 | break; \ | ||
185 | case 4: \ | ||
186 | __ret = __get_user_4(__gu_addr); \ | ||
187 | break; \ | ||
188 | case 8: \ | ||
189 | __ret = __get_user_8(__gu_addr); \ | ||
190 | break; \ | ||
191 | default: \ | ||
192 | __ret = __get_user_bad(); \ | ||
193 | break; \ | ||
194 | } \ | ||
195 | (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ | ||
196 | __ret.val; \ | ||
197 | __ret.err; \ | ||
198 | }) | ||
199 | |||
200 | /** | ||
201 | * __put_user: - Write a simple value into user space, with less checking. | ||
202 | * @x: Value to copy to user space. | ||
203 | * @ptr: Destination address, in user space. | ||
204 | * | ||
205 | * Context: User context only. This function may sleep. | ||
206 | * | ||
207 | * This macro copies a single simple value from kernel space to user | ||
208 | * space. It supports simple types like char and int, but not larger | ||
209 | * data types like structures or arrays. | ||
210 | * | ||
211 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
212 | * to the result of dereferencing @ptr. | ||
213 | * | ||
214 | * Caller must check the pointer with access_ok() before calling this | ||
215 | * function. | ||
216 | * | ||
217 | * Returns zero on success, or -EFAULT on error. | ||
218 | * | ||
219 | * Implementation note: The "case 8" logic of casting to the type of | ||
220 | * the result of subtracting the value from itself is basically a way | ||
221 | * of keeping all integer types the same, but casting any pointers to | ||
222 | * ptrdiff_t, i.e. also an integer type. This way there are no | ||
223 | * questionable casts seen by the compiler on an ILP32 platform. | ||
224 | */ | ||
225 | #define __put_user(x, ptr) \ | ||
226 | ({ \ | ||
227 | int __pu_err = 0; \ | ||
228 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
229 | typeof(*__pu_addr) __pu_val = (x); \ | ||
230 | __chk_user_ptr(__pu_addr); \ | ||
231 | switch (sizeof(__pu_val)) { \ | ||
232 | case 1: \ | ||
233 | __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ | ||
234 | break; \ | ||
235 | case 2: \ | ||
236 | __pu_err = __put_user_2((long)__pu_val, __pu_addr); \ | ||
237 | break; \ | ||
238 | case 4: \ | ||
239 | __pu_err = __put_user_4((long)__pu_val, __pu_addr); \ | ||
240 | break; \ | ||
241 | case 8: \ | ||
242 | __pu_err = \ | ||
243 | __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\ | ||
244 | __pu_addr); \ | ||
245 | break; \ | ||
246 | default: \ | ||
247 | __pu_err = __put_user_bad(); \ | ||
248 | break; \ | ||
249 | } \ | ||
250 | __pu_err; \ | ||
251 | }) | ||
252 | |||
253 | /* | ||
254 | * The versions of get_user and put_user without initial underscores | ||
255 | * check the address of their arguments to make sure they are not | ||
256 | * in kernel space. | ||
257 | */ | ||
258 | #define put_user(x, ptr) \ | ||
259 | ({ \ | ||
260 | __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \ | ||
261 | access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \ | ||
262 | __put_user((x), (__Pu_addr)) : \ | ||
263 | -EFAULT; \ | ||
264 | }) | ||
265 | |||
266 | #define get_user(x, ptr) \ | ||
267 | ({ \ | ||
268 | __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \ | ||
269 | access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \ | ||
270 | __get_user((x), (__Gu_addr)) : \ | ||
271 | ((x) = 0, -EFAULT); \ | ||
272 | }) | ||
273 | |||
274 | /** | ||
275 | * __copy_to_user() - copy data into user space, with less checking. | ||
276 | * @to: Destination address, in user space. | ||
277 | * @from: Source address, in kernel space. | ||
278 | * @n: Number of bytes to copy. | ||
279 | * | ||
280 | * Context: User context only. This function may sleep. | ||
281 | * | ||
282 | * Copy data from kernel space to user space. Caller must check | ||
283 | * the specified block with access_ok() before calling this function. | ||
284 | * | ||
285 | * Returns number of bytes that could not be copied. | ||
286 | * On success, this will be zero. | ||
287 | * | ||
288 | * An alternate version - __copy_to_user_inatomic() - is designed | ||
289 | * to be called from atomic context, typically bracketed by calls | ||
290 | * to pagefault_disable() and pagefault_enable(). | ||
291 | */ | ||
292 | extern unsigned long __must_check __copy_to_user_inatomic( | ||
293 | void __user *to, const void *from, unsigned long n); | ||
294 | |||
295 | static inline unsigned long __must_check | ||
296 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
297 | { | ||
298 | might_fault(); | ||
299 | return __copy_to_user_inatomic(to, from, n); | ||
300 | } | ||
301 | |||
302 | static inline unsigned long __must_check | ||
303 | copy_to_user(void __user *to, const void *from, unsigned long n) | ||
304 | { | ||
305 | if (access_ok(VERIFY_WRITE, to, n)) | ||
306 | n = __copy_to_user(to, from, n); | ||
307 | return n; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * __copy_from_user() - copy data from user space, with less checking. | ||
312 | * @to: Destination address, in kernel space. | ||
313 | * @from: Source address, in user space. | ||
314 | * @n: Number of bytes to copy. | ||
315 | * | ||
316 | * Context: User context only. This function may sleep. | ||
317 | * | ||
318 | * Copy data from user space to kernel space. Caller must check | ||
319 | * the specified block with access_ok() before calling this function. | ||
320 | * | ||
321 | * Returns number of bytes that could not be copied. | ||
322 | * On success, this will be zero. | ||
323 | * | ||
324 | * If some data could not be copied, this function will pad the copied | ||
325 | * data to the requested size using zero bytes. | ||
326 | * | ||
327 | * An alternate version - __copy_from_user_inatomic() - is designed | ||
328 | * to be called from atomic context, typically bracketed by calls | ||
329 | * to pagefault_disable() and pagefault_enable(). This version | ||
330 | * does *NOT* pad with zeros. | ||
331 | */ | ||
332 | extern unsigned long __must_check __copy_from_user_inatomic( | ||
333 | void *to, const void __user *from, unsigned long n); | ||
334 | extern unsigned long __must_check __copy_from_user_zeroing( | ||
335 | void *to, const void __user *from, unsigned long n); | ||
336 | |||
337 | static inline unsigned long __must_check | ||
338 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
339 | { | ||
340 | might_fault(); | ||
341 | return __copy_from_user_zeroing(to, from, n); | ||
342 | } | ||
343 | |||
344 | static inline unsigned long __must_check | ||
345 | _copy_from_user(void *to, const void __user *from, unsigned long n) | ||
346 | { | ||
347 | if (access_ok(VERIFY_READ, from, n)) | ||
348 | n = __copy_from_user(to, from, n); | ||
349 | else | ||
350 | memset(to, 0, n); | ||
351 | return n; | ||
352 | } | ||
353 | |||
354 | #ifdef CONFIG_DEBUG_COPY_FROM_USER | ||
355 | extern void copy_from_user_overflow(void) | ||
356 | __compiletime_warning("copy_from_user() size is not provably correct"); | ||
357 | |||
358 | static inline unsigned long __must_check copy_from_user(void *to, | ||
359 | const void __user *from, | ||
360 | unsigned long n) | ||
361 | { | ||
362 | int sz = __compiletime_object_size(to); | ||
363 | |||
364 | if (likely(sz == -1 || sz >= n)) | ||
365 | n = _copy_from_user(to, from, n); | ||
366 | else | ||
367 | copy_from_user_overflow(); | ||
368 | |||
369 | return n; | ||
370 | } | ||
371 | #else | ||
372 | #define copy_from_user _copy_from_user | ||
373 | #endif | ||
374 | |||
375 | #ifdef __tilegx__ | ||
376 | /** | ||
377 | * __copy_in_user() - copy data within user space, with less checking. | ||
378 | * @to: Destination address, in user space. | ||
379 | * @from: Source address, in kernel space. | ||
380 | * @n: Number of bytes to copy. | ||
381 | * | ||
382 | * Context: User context only. This function may sleep. | ||
383 | * | ||
384 | * Copy data from user space to user space. Caller must check | ||
385 | * the specified blocks with access_ok() before calling this function. | ||
386 | * | ||
387 | * Returns number of bytes that could not be copied. | ||
388 | * On success, this will be zero. | ||
389 | */ | ||
390 | extern unsigned long __copy_in_user_asm( | ||
391 | void __user *to, const void __user *from, unsigned long n); | ||
392 | |||
393 | static inline unsigned long __must_check | ||
394 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
395 | { | ||
396 | might_sleep(); | ||
397 | return __copy_in_user_asm(to, from, n); | ||
398 | } | ||
399 | |||
400 | static inline unsigned long __must_check | ||
401 | copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
402 | { | ||
403 | if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) | ||
404 | n = __copy_in_user(to, from, n); | ||
405 | return n; | ||
406 | } | ||
407 | #endif | ||
408 | |||
409 | |||
410 | /** | ||
411 | * strlen_user: - Get the size of a string in user space. | ||
412 | * @str: The string to measure. | ||
413 | * | ||
414 | * Context: User context only. This function may sleep. | ||
415 | * | ||
416 | * Get the size of a NUL-terminated string in user space. | ||
417 | * | ||
418 | * Returns the size of the string INCLUDING the terminating NUL. | ||
419 | * On exception, returns 0. | ||
420 | * | ||
421 | * If there is a limit on the length of a valid string, you may wish to | ||
422 | * consider using strnlen_user() instead. | ||
423 | */ | ||
424 | extern long strnlen_user_asm(const char __user *str, long n); | ||
425 | static inline long __must_check strnlen_user(const char __user *str, long n) | ||
426 | { | ||
427 | might_fault(); | ||
428 | return strnlen_user_asm(str, n); | ||
429 | } | ||
430 | #define strlen_user(str) strnlen_user(str, LONG_MAX) | ||
431 | |||
432 | /** | ||
433 | * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | ||
434 | * @dst: Destination address, in kernel space. This buffer must be at | ||
435 | * least @count bytes long. | ||
436 | * @src: Source address, in user space. | ||
437 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
438 | * | ||
439 | * Copies a NUL-terminated string from userspace to kernel space. | ||
440 | * Caller must check the specified block with access_ok() before calling | ||
441 | * this function. | ||
442 | * | ||
443 | * On success, returns the length of the string (not including the trailing | ||
444 | * NUL). | ||
445 | * | ||
446 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
447 | * copied). | ||
448 | * | ||
449 | * If @count is smaller than the length of the string, copies @count bytes | ||
450 | * and returns @count. | ||
451 | */ | ||
452 | extern long strncpy_from_user_asm(char *dst, const char __user *src, long); | ||
453 | static inline long __must_check __strncpy_from_user( | ||
454 | char *dst, const char __user *src, long count) | ||
455 | { | ||
456 | might_fault(); | ||
457 | return strncpy_from_user_asm(dst, src, count); | ||
458 | } | ||
459 | static inline long __must_check strncpy_from_user( | ||
460 | char *dst, const char __user *src, long count) | ||
461 | { | ||
462 | if (access_ok(VERIFY_READ, src, 1)) | ||
463 | return __strncpy_from_user(dst, src, count); | ||
464 | return -EFAULT; | ||
465 | } | ||
466 | |||
467 | /** | ||
468 | * clear_user: - Zero a block of memory in user space. | ||
469 | * @mem: Destination address, in user space. | ||
470 | * @len: Number of bytes to zero. | ||
471 | * | ||
472 | * Zero a block of memory in user space. | ||
473 | * | ||
474 | * Returns number of bytes that could not be cleared. | ||
475 | * On success, this will be zero. | ||
476 | */ | ||
477 | extern unsigned long clear_user_asm(void __user *mem, unsigned long len); | ||
478 | static inline unsigned long __must_check __clear_user( | ||
479 | void __user *mem, unsigned long len) | ||
480 | { | ||
481 | might_fault(); | ||
482 | return clear_user_asm(mem, len); | ||
483 | } | ||
484 | static inline unsigned long __must_check clear_user( | ||
485 | void __user *mem, unsigned long len) | ||
486 | { | ||
487 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
488 | return __clear_user(mem, len); | ||
489 | return len; | ||
490 | } | ||
491 | |||
492 | /** | ||
493 | * flush_user: - Flush a block of memory in user space from cache. | ||
494 | * @mem: Destination address, in user space. | ||
495 | * @len: Number of bytes to flush. | ||
496 | * | ||
497 | * Returns number of bytes that could not be flushed. | ||
498 | * On success, this will be zero. | ||
499 | */ | ||
500 | extern unsigned long flush_user_asm(void __user *mem, unsigned long len); | ||
501 | static inline unsigned long __must_check __flush_user( | ||
502 | void __user *mem, unsigned long len) | ||
503 | { | ||
504 | int retval; | ||
505 | |||
506 | might_fault(); | ||
507 | retval = flush_user_asm(mem, len); | ||
508 | mb_incoherent(); | ||
509 | return retval; | ||
510 | } | ||
511 | |||
512 | static inline unsigned long __must_check flush_user( | ||
513 | void __user *mem, unsigned long len) | ||
514 | { | ||
515 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
516 | return __flush_user(mem, len); | ||
517 | return len; | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * inv_user: - Invalidate a block of memory in user space from cache. | ||
522 | * @mem: Destination address, in user space. | ||
523 | * @len: Number of bytes to invalidate. | ||
524 | * | ||
525 | * Returns number of bytes that could not be invalidated. | ||
526 | * On success, this will be zero. | ||
527 | * | ||
528 | * Note that on Tile64, the "inv" operation is in fact a | ||
529 | * "flush and invalidate", so cache write-backs will occur prior | ||
530 | * to the cache being marked invalid. | ||
531 | */ | ||
532 | extern unsigned long inv_user_asm(void __user *mem, unsigned long len); | ||
533 | static inline unsigned long __must_check __inv_user( | ||
534 | void __user *mem, unsigned long len) | ||
535 | { | ||
536 | int retval; | ||
537 | |||
538 | might_fault(); | ||
539 | retval = inv_user_asm(mem, len); | ||
540 | mb_incoherent(); | ||
541 | return retval; | ||
542 | } | ||
543 | static inline unsigned long __must_check inv_user( | ||
544 | void __user *mem, unsigned long len) | ||
545 | { | ||
546 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
547 | return __inv_user(mem, len); | ||
548 | return len; | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * finv_user: - Flush-inval a block of memory in user space from cache. | ||
553 | * @mem: Destination address, in user space. | ||
554 | * @len: Number of bytes to invalidate. | ||
555 | * | ||
556 | * Returns number of bytes that could not be flush-invalidated. | ||
557 | * On success, this will be zero. | ||
558 | */ | ||
559 | extern unsigned long finv_user_asm(void __user *mem, unsigned long len); | ||
560 | static inline unsigned long __must_check __finv_user( | ||
561 | void __user *mem, unsigned long len) | ||
562 | { | ||
563 | int retval; | ||
564 | |||
565 | might_fault(); | ||
566 | retval = finv_user_asm(mem, len); | ||
567 | mb_incoherent(); | ||
568 | return retval; | ||
569 | } | ||
570 | static inline unsigned long __must_check finv_user( | ||
571 | void __user *mem, unsigned long len) | ||
572 | { | ||
573 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
574 | return __finv_user(mem, len); | ||
575 | return len; | ||
576 | } | ||
577 | |||
578 | #endif /* _ASM_TILE_UACCESS_H */ | ||
diff --git a/arch/tile/include/asm/ucontext.h b/arch/tile/include/asm/ucontext.h new file mode 100644 index 000000000000..9bc07b9f30fb --- /dev/null +++ b/arch/tile/include/asm/ucontext.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ucontext.h> | |||
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h new file mode 100644 index 000000000000..137e2de5b102 --- /dev/null +++ b/arch/tile/include/asm/unaligned.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UNALIGNED_H | ||
16 | #define _ASM_TILE_UNALIGNED_H | ||
17 | |||
18 | #include <linux/unaligned/le_struct.h> | ||
19 | #include <linux/unaligned/be_byteshift.h> | ||
20 | #include <linux/unaligned/generic.h> | ||
21 | #define get_unaligned __get_unaligned_le | ||
22 | #define put_unaligned __put_unaligned_le | ||
23 | |||
24 | #endif /* _ASM_TILE_UNALIGNED_H */ | ||
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h new file mode 100644 index 000000000000..03b3d5d665dd --- /dev/null +++ b/arch/tile/include/asm/unistd.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) | ||
16 | #define _ASM_TILE_UNISTD_H | ||
17 | |||
18 | |||
19 | #ifndef __LP64__ | ||
20 | /* Use the flavor of this syscall that matches the 32-bit API better. */ | ||
21 | #define __ARCH_WANT_SYNC_FILE_RANGE2 | ||
22 | #endif | ||
23 | |||
24 | /* Use the standard ABI for syscalls. */ | ||
25 | #include <asm-generic/unistd.h> | ||
26 | |||
27 | #ifndef __tilegx__ | ||
28 | /* "Fast" syscalls provide atomic support for 32-bit chips. */ | ||
29 | #define __NR_FAST_cmpxchg -1 | ||
30 | #define __NR_FAST_atomic_update -2 | ||
31 | #define __NR_FAST_cmpxchg64 -3 | ||
32 | #define __NR_cmpxchg_badaddr (__NR_arch_specific_syscall + 0) | ||
33 | __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr) | ||
34 | #endif | ||
35 | |||
36 | /* Additional Tilera-specific syscalls. */ | ||
37 | #define __NR_flush_cache (__NR_arch_specific_syscall + 1) | ||
38 | __SYSCALL(__NR_flush_cache, sys_flush_cache) | ||
39 | |||
40 | #ifdef __KERNEL__ | ||
41 | /* In compat mode, we use sys_llseek() for compat_sys_llseek(). */ | ||
42 | #ifdef CONFIG_COMPAT | ||
43 | #define __ARCH_WANT_SYS_LLSEEK | ||
44 | #endif | ||
45 | #endif | ||
46 | |||
47 | #endif /* _ASM_TILE_UNISTD_H */ | ||
diff --git a/arch/tile/include/asm/user.h b/arch/tile/include/asm/user.h new file mode 100644 index 000000000000..cbc8b4d5a5ce --- /dev/null +++ b/arch/tile/include/asm/user.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_USER_H | ||
17 | #define _ASM_TILE_USER_H | ||
18 | |||
19 | /* This header is for a.out file formats, which TILE does not support. */ | ||
20 | |||
21 | #endif /* _ASM_TILE_USER_H */ | ||
diff --git a/arch/tile/include/asm/xor.h b/arch/tile/include/asm/xor.h new file mode 100644 index 000000000000..c82eb12a5b18 --- /dev/null +++ b/arch/tile/include/asm/xor.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/xor.h> | |||
diff --git a/arch/tile/include/hv/drv_pcie_rc_intf.h b/arch/tile/include/hv/drv_pcie_rc_intf.h new file mode 100644 index 000000000000..9bd2243bece0 --- /dev/null +++ b/arch/tile/include/hv/drv_pcie_rc_intf.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drv_pcie_rc_intf.h | ||
17 | * Interface definitions for the PCIE Root Complex. | ||
18 | */ | ||
19 | |||
20 | #ifndef _SYS_HV_DRV_PCIE_RC_INTF_H | ||
21 | #define _SYS_HV_DRV_PCIE_RC_INTF_H | ||
22 | |||
23 | /** File offset for reading the interrupt base number used for PCIE legacy | ||
24 | interrupts and PLX Gen 1 requirement flag */ | ||
25 | #define PCIE_RC_CONFIG_MASK_OFF 0 | ||
26 | |||
27 | |||
28 | /** | ||
29 | * Structure used for obtaining PCIe config information, read from the PCIE | ||
30 | * subsystem /ctl file at initialization | ||
31 | */ | ||
32 | typedef struct pcie_rc_config | ||
33 | { | ||
34 | int intr; /**< interrupt number used for downcall */ | ||
35 | int plx_gen1; /**< flag for PLX Gen 1 configuration */ | ||
36 | } pcie_rc_config_t; | ||
37 | |||
38 | #endif /* _SYS_HV_DRV_PCIE_RC_INTF_H */ | ||
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h new file mode 100644 index 000000000000..84b31551080a --- /dev/null +++ b/arch/tile/include/hv/hypervisor.h | |||
@@ -0,0 +1,2366 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file hypervisor.h | ||
17 | * The hypervisor's public API. | ||
18 | */ | ||
19 | |||
20 | #ifndef _TILE_HV_H | ||
21 | #define _TILE_HV_H | ||
22 | |||
23 | #ifdef __tile__ | ||
24 | #include <arch/chip.h> | ||
25 | #else | ||
26 | /* HACK: Allow use by "tools/cpack/". */ | ||
27 | #include "install/include/arch/chip.h" | ||
28 | #endif | ||
29 | |||
30 | /* Linux builds want unsigned long constants, but assembler wants numbers */ | ||
31 | #ifdef __ASSEMBLER__ | ||
32 | /** One, for assembler */ | ||
33 | #define __HV_SIZE_ONE 1 | ||
34 | #elif !defined(__tile__) && CHIP_VA_WIDTH() > 32 | ||
35 | /** One, for 64-bit on host */ | ||
36 | #define __HV_SIZE_ONE 1ULL | ||
37 | #else | ||
38 | /** One, for Linux */ | ||
39 | #define __HV_SIZE_ONE 1UL | ||
40 | #endif | ||
41 | |||
42 | |||
43 | /** The log2 of the span of a level-1 page table, in bytes. | ||
44 | */ | ||
45 | #define HV_LOG2_L1_SPAN 32 | ||
46 | |||
47 | /** The span of a level-1 page table, in bytes. | ||
48 | */ | ||
49 | #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) | ||
50 | |||
51 | /** The log2 of the size of small pages, in bytes. This value should | ||
52 | * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). | ||
53 | */ | ||
54 | #define HV_LOG2_PAGE_SIZE_SMALL 16 | ||
55 | |||
56 | /** The size of small pages, in bytes. This value should be verified | ||
57 | * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). | ||
58 | */ | ||
59 | #define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) | ||
60 | |||
61 | /** The log2 of the size of large pages, in bytes. This value should be | ||
62 | * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). | ||
63 | */ | ||
64 | #define HV_LOG2_PAGE_SIZE_LARGE 24 | ||
65 | |||
66 | /** The size of large pages, in bytes. This value should be verified | ||
67 | * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). | ||
68 | */ | ||
69 | #define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) | ||
70 | |||
71 | /** The log2 of the granularity at which page tables must be aligned; | ||
72 | * in other words, the CPA for a page table must have this many zero | ||
73 | * bits at the bottom of the address. | ||
74 | */ | ||
75 | #define HV_LOG2_PAGE_TABLE_ALIGN 11 | ||
76 | |||
77 | /** The granularity at which page tables must be aligned. | ||
78 | */ | ||
79 | #define HV_PAGE_TABLE_ALIGN (__HV_SIZE_ONE << HV_LOG2_PAGE_TABLE_ALIGN) | ||
80 | |||
81 | /** Normal start of hypervisor glue in client physical memory. */ | ||
82 | #define HV_GLUE_START_CPA 0x10000 | ||
83 | |||
84 | /** This much space is reserved at HV_GLUE_START_CPA | ||
85 | * for the hypervisor glue. The client program must start at | ||
86 | * some address higher than this, and in particular the address of | ||
87 | * its text section should be equal to zero modulo HV_PAGE_SIZE_LARGE | ||
88 | * so that relative offsets to the HV glue are correct. | ||
89 | */ | ||
90 | #define HV_GLUE_RESERVED_SIZE 0x10000 | ||
91 | |||
92 | /** Each entry in the hv dispatch array takes this many bytes. */ | ||
93 | #define HV_DISPATCH_ENTRY_SIZE 32 | ||
94 | |||
95 | /** Version of the hypervisor interface defined by this file */ | ||
96 | #define _HV_VERSION 10 | ||
97 | |||
98 | /* Index into hypervisor interface dispatch code blocks. | ||
99 | * | ||
100 | * Hypervisor calls are invoked from user space by calling code | ||
101 | * at an address HV_BASE_ADDRESS + (index) * HV_DISPATCH_ENTRY_SIZE, | ||
102 | * where index is one of these enum values. | ||
103 | * | ||
104 | * Normally a supervisor is expected to produce a set of symbols | ||
105 | * starting at HV_BASE_ADDRESS that obey this convention, but a user | ||
106 | * program could call directly through function pointers if desired. | ||
107 | * | ||
108 | * These numbers are part of the binary API and will not be changed | ||
109 | * without updating HV_VERSION, which should be a rare event. | ||
110 | */ | ||
111 | |||
112 | /** reserved. */ | ||
113 | #define _HV_DISPATCH_RESERVED 0 | ||
114 | |||
115 | /** hv_init */ | ||
116 | #define HV_DISPATCH_INIT 1 | ||
117 | |||
118 | /** hv_install_context */ | ||
119 | #define HV_DISPATCH_INSTALL_CONTEXT 2 | ||
120 | |||
121 | /** hv_sysconf */ | ||
122 | #define HV_DISPATCH_SYSCONF 3 | ||
123 | |||
124 | /** hv_get_rtc */ | ||
125 | #define HV_DISPATCH_GET_RTC 4 | ||
126 | |||
127 | /** hv_set_rtc */ | ||
128 | #define HV_DISPATCH_SET_RTC 5 | ||
129 | |||
130 | /** hv_flush_asid */ | ||
131 | #define HV_DISPATCH_FLUSH_ASID 6 | ||
132 | |||
133 | /** hv_flush_page */ | ||
134 | #define HV_DISPATCH_FLUSH_PAGE 7 | ||
135 | |||
136 | /** hv_flush_pages */ | ||
137 | #define HV_DISPATCH_FLUSH_PAGES 8 | ||
138 | |||
139 | /** hv_restart */ | ||
140 | #define HV_DISPATCH_RESTART 9 | ||
141 | |||
142 | /** hv_halt */ | ||
143 | #define HV_DISPATCH_HALT 10 | ||
144 | |||
145 | /** hv_power_off */ | ||
146 | #define HV_DISPATCH_POWER_OFF 11 | ||
147 | |||
148 | /** hv_inquire_physical */ | ||
149 | #define HV_DISPATCH_INQUIRE_PHYSICAL 12 | ||
150 | |||
151 | /** hv_inquire_memory_controller */ | ||
152 | #define HV_DISPATCH_INQUIRE_MEMORY_CONTROLLER 13 | ||
153 | |||
154 | /** hv_inquire_virtual */ | ||
155 | #define HV_DISPATCH_INQUIRE_VIRTUAL 14 | ||
156 | |||
157 | /** hv_inquire_asid */ | ||
158 | #define HV_DISPATCH_INQUIRE_ASID 15 | ||
159 | |||
160 | /** hv_nanosleep */ | ||
161 | #define HV_DISPATCH_NANOSLEEP 16 | ||
162 | |||
163 | /** hv_console_read_if_ready */ | ||
164 | #define HV_DISPATCH_CONSOLE_READ_IF_READY 17 | ||
165 | |||
166 | /** hv_console_write */ | ||
167 | #define HV_DISPATCH_CONSOLE_WRITE 18 | ||
168 | |||
169 | /** hv_downcall_dispatch */ | ||
170 | #define HV_DISPATCH_DOWNCALL_DISPATCH 19 | ||
171 | |||
172 | /** hv_inquire_topology */ | ||
173 | #define HV_DISPATCH_INQUIRE_TOPOLOGY 20 | ||
174 | |||
175 | /** hv_fs_findfile */ | ||
176 | #define HV_DISPATCH_FS_FINDFILE 21 | ||
177 | |||
178 | /** hv_fs_fstat */ | ||
179 | #define HV_DISPATCH_FS_FSTAT 22 | ||
180 | |||
181 | /** hv_fs_pread */ | ||
182 | #define HV_DISPATCH_FS_PREAD 23 | ||
183 | |||
184 | /** hv_physaddr_read64 */ | ||
185 | #define HV_DISPATCH_PHYSADDR_READ64 24 | ||
186 | |||
187 | /** hv_physaddr_write64 */ | ||
188 | #define HV_DISPATCH_PHYSADDR_WRITE64 25 | ||
189 | |||
190 | /** hv_get_command_line */ | ||
191 | #define HV_DISPATCH_GET_COMMAND_LINE 26 | ||
192 | |||
193 | /** hv_set_caching */ | ||
194 | #define HV_DISPATCH_SET_CACHING 27 | ||
195 | |||
196 | /** hv_bzero_page */ | ||
197 | #define HV_DISPATCH_BZERO_PAGE 28 | ||
198 | |||
199 | /** hv_register_message_state */ | ||
200 | #define HV_DISPATCH_REGISTER_MESSAGE_STATE 29 | ||
201 | |||
202 | /** hv_send_message */ | ||
203 | #define HV_DISPATCH_SEND_MESSAGE 30 | ||
204 | |||
205 | /** hv_receive_message */ | ||
206 | #define HV_DISPATCH_RECEIVE_MESSAGE 31 | ||
207 | |||
208 | /** hv_inquire_context */ | ||
209 | #define HV_DISPATCH_INQUIRE_CONTEXT 32 | ||
210 | |||
211 | /** hv_start_all_tiles */ | ||
212 | #define HV_DISPATCH_START_ALL_TILES 33 | ||
213 | |||
214 | /** hv_dev_open */ | ||
215 | #define HV_DISPATCH_DEV_OPEN 34 | ||
216 | |||
217 | /** hv_dev_close */ | ||
218 | #define HV_DISPATCH_DEV_CLOSE 35 | ||
219 | |||
220 | /** hv_dev_pread */ | ||
221 | #define HV_DISPATCH_DEV_PREAD 36 | ||
222 | |||
223 | /** hv_dev_pwrite */ | ||
224 | #define HV_DISPATCH_DEV_PWRITE 37 | ||
225 | |||
226 | /** hv_dev_poll */ | ||
227 | #define HV_DISPATCH_DEV_POLL 38 | ||
228 | |||
229 | /** hv_dev_poll_cancel */ | ||
230 | #define HV_DISPATCH_DEV_POLL_CANCEL 39 | ||
231 | |||
232 | /** hv_dev_preada */ | ||
233 | #define HV_DISPATCH_DEV_PREADA 40 | ||
234 | |||
235 | /** hv_dev_pwritea */ | ||
236 | #define HV_DISPATCH_DEV_PWRITEA 41 | ||
237 | |||
238 | /** hv_flush_remote */ | ||
239 | #define HV_DISPATCH_FLUSH_REMOTE 42 | ||
240 | |||
241 | /** hv_console_putc */ | ||
242 | #define HV_DISPATCH_CONSOLE_PUTC 43 | ||
243 | |||
244 | /** hv_inquire_tiles */ | ||
245 | #define HV_DISPATCH_INQUIRE_TILES 44 | ||
246 | |||
247 | /** hv_confstr */ | ||
248 | #define HV_DISPATCH_CONFSTR 45 | ||
249 | |||
250 | /** hv_reexec */ | ||
251 | #define HV_DISPATCH_REEXEC 46 | ||
252 | |||
253 | /** hv_set_command_line */ | ||
254 | #define HV_DISPATCH_SET_COMMAND_LINE 47 | ||
255 | |||
256 | /** hv_dev_register_intr_state */ | ||
257 | #define HV_DISPATCH_DEV_REGISTER_INTR_STATE 48 | ||
258 | |||
259 | /** hv_enable_intr */ | ||
260 | #define HV_DISPATCH_ENABLE_INTR 49 | ||
261 | |||
262 | /** hv_disable_intr */ | ||
263 | #define HV_DISPATCH_DISABLE_INTR 50 | ||
264 | |||
265 | /** hv_trigger_ipi */ | ||
266 | #define HV_DISPATCH_TRIGGER_IPI 51 | ||
267 | |||
268 | /** hv_store_mapping */ | ||
269 | #define HV_DISPATCH_STORE_MAPPING 52 | ||
270 | |||
271 | /** hv_inquire_realpa */ | ||
272 | #define HV_DISPATCH_INQUIRE_REALPA 53 | ||
273 | |||
274 | /** hv_flush_all */ | ||
275 | #define HV_DISPATCH_FLUSH_ALL 54 | ||
276 | |||
277 | /** One more than the largest dispatch value */ | ||
278 | #define _HV_DISPATCH_END 55 | ||
279 | |||
280 | |||
281 | #ifndef __ASSEMBLER__ | ||
282 | |||
283 | #ifdef __KERNEL__ | ||
284 | #include <asm/types.h> | ||
285 | typedef u32 __hv32; /**< 32-bit value */ | ||
286 | typedef u64 __hv64; /**< 64-bit value */ | ||
287 | #else | ||
288 | #include <stdint.h> | ||
289 | typedef uint32_t __hv32; /**< 32-bit value */ | ||
290 | typedef uint64_t __hv64; /**< 64-bit value */ | ||
291 | #endif | ||
292 | |||
293 | |||
294 | /** Hypervisor physical address. */ | ||
295 | typedef __hv64 HV_PhysAddr; | ||
296 | |||
297 | #if CHIP_VA_WIDTH() > 32 | ||
298 | /** Hypervisor virtual address. */ | ||
299 | typedef __hv64 HV_VirtAddr; | ||
300 | #else | ||
301 | /** Hypervisor virtual address. */ | ||
302 | typedef __hv32 HV_VirtAddr; | ||
303 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
304 | |||
305 | /** Hypervisor ASID. */ | ||
306 | typedef unsigned int HV_ASID; | ||
307 | |||
308 | /** Hypervisor tile location for a memory access | ||
309 | * ("location overridden target"). | ||
310 | */ | ||
311 | typedef unsigned int HV_LOTAR; | ||
312 | |||
313 | /** Hypervisor size of a page. */ | ||
314 | typedef unsigned long HV_PageSize; | ||
315 | |||
316 | /** A page table entry. | ||
317 | */ | ||
318 | typedef struct | ||
319 | { | ||
320 | __hv64 val; /**< Value of PTE */ | ||
321 | } HV_PTE; | ||
322 | |||
323 | /** Hypervisor error code. */ | ||
324 | typedef int HV_Errno; | ||
325 | |||
326 | #endif /* !__ASSEMBLER__ */ | ||
327 | |||
328 | #define HV_OK 0 /**< No error */ | ||
329 | #define HV_EINVAL -801 /**< Invalid argument */ | ||
330 | #define HV_ENODEV -802 /**< No such device */ | ||
331 | #define HV_ENOENT -803 /**< No such file or directory */ | ||
332 | #define HV_EBADF -804 /**< Bad file number */ | ||
333 | #define HV_EFAULT -805 /**< Bad address */ | ||
334 | #define HV_ERECIP -806 /**< Bad recipients */ | ||
335 | #define HV_E2BIG -807 /**< Message too big */ | ||
336 | #define HV_ENOTSUP -808 /**< Service not supported */ | ||
337 | #define HV_EBUSY -809 /**< Device busy */ | ||
338 | #define HV_ENOSYS -810 /**< Invalid syscall */ | ||
339 | #define HV_EPERM -811 /**< No permission */ | ||
340 | #define HV_ENOTREADY -812 /**< Device not ready */ | ||
341 | #define HV_EIO -813 /**< I/O error */ | ||
342 | #define HV_ENOMEM -814 /**< Out of memory */ | ||
343 | |||
344 | #define HV_ERR_MAX -801 /**< Largest HV error code */ | ||
345 | #define HV_ERR_MIN -814 /**< Smallest HV error code */ | ||
346 | |||
347 | #ifndef __ASSEMBLER__ | ||
348 | |||
349 | /** Pass HV_VERSION to hv_init to request this version of the interface. */ | ||
350 | typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; | ||
351 | |||
352 | /** Initializes the hypervisor. | ||
353 | * | ||
354 | * @param interface_version_number The version of the hypervisor interface | ||
355 | * that this program expects, typically HV_VERSION. | ||
356 | * @param chip_num Architecture number of the chip the client was built for. | ||
357 | * @param chip_rev_num Revision number of the chip the client was built for. | ||
358 | */ | ||
359 | void hv_init(HV_VersionNumber interface_version_number, | ||
360 | int chip_num, int chip_rev_num); | ||
361 | |||
362 | |||
363 | /** Queries we can make for hv_sysconf(). | ||
364 | * | ||
365 | * These numbers are part of the binary API and guaranteed not to change. | ||
366 | */ | ||
367 | typedef enum { | ||
368 | /** An invalid value; do not use. */ | ||
369 | _HV_SYSCONF_RESERVED = 0, | ||
370 | |||
371 | /** The length of the glue section containing the hv_ procs, in bytes. */ | ||
372 | HV_SYSCONF_GLUE_SIZE = 1, | ||
373 | |||
374 | /** The size of small pages, in bytes. */ | ||
375 | HV_SYSCONF_PAGE_SIZE_SMALL = 2, | ||
376 | |||
377 | /** The size of large pages, in bytes. */ | ||
378 | HV_SYSCONF_PAGE_SIZE_LARGE = 3, | ||
379 | |||
380 | /** Processor clock speed, in hertz. */ | ||
381 | HV_SYSCONF_CPU_SPEED = 4, | ||
382 | |||
383 | /** Processor temperature, in degrees Kelvin. The value | ||
384 | * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees | ||
385 | * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates | ||
386 | * that the temperature has hit an upper limit and is no longer being | ||
387 | * accurately tracked. | ||
388 | */ | ||
389 | HV_SYSCONF_CPU_TEMP = 5, | ||
390 | |||
391 | /** Board temperature, in degrees Kelvin. The value | ||
392 | * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees | ||
393 | * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates | ||
394 | * that the temperature has hit an upper limit and is no longer being | ||
395 | * accurately tracked. | ||
396 | */ | ||
397 | HV_SYSCONF_BOARD_TEMP = 6 | ||
398 | |||
399 | } HV_SysconfQuery; | ||
400 | |||
401 | /** Offset to subtract from returned Kelvin temperature to get degrees | ||
402 | Celsius. */ | ||
403 | #define HV_SYSCONF_TEMP_KTOC 273 | ||
404 | |||
405 | /** Pseudo-temperature value indicating that the temperature has | ||
406 | * pegged at its upper limit and is no longer accurate; note that this is | ||
407 | * the value after subtracting HV_SYSCONF_TEMP_KTOC. */ | ||
408 | #define HV_SYSCONF_OVERTEMP 999 | ||
409 | |||
410 | /** Query a configuration value from the hypervisor. | ||
411 | * @param query Which value is requested (HV_SYSCONF_xxx). | ||
412 | * @return The requested value, or -1 the requested value is illegal or | ||
413 | * unavailable. | ||
414 | */ | ||
415 | long hv_sysconf(HV_SysconfQuery query); | ||
416 | |||
417 | |||
418 | /** Queries we can make for hv_confstr(). | ||
419 | * | ||
420 | * These numbers are part of the binary API and guaranteed not to change. | ||
421 | */ | ||
422 | typedef enum { | ||
423 | /** An invalid value; do not use. */ | ||
424 | _HV_CONFSTR_RESERVED = 0, | ||
425 | |||
426 | /** Board part number. */ | ||
427 | HV_CONFSTR_BOARD_PART_NUM = 1, | ||
428 | |||
429 | /** Board serial number. */ | ||
430 | HV_CONFSTR_BOARD_SERIAL_NUM = 2, | ||
431 | |||
432 | /** Chip serial number. */ | ||
433 | HV_CONFSTR_CHIP_SERIAL_NUM = 3, | ||
434 | |||
435 | /** Board revision level. */ | ||
436 | HV_CONFSTR_BOARD_REV = 4, | ||
437 | |||
438 | /** Hypervisor software version. */ | ||
439 | HV_CONFSTR_HV_SW_VER = 5, | ||
440 | |||
441 | /** The name for this chip model. */ | ||
442 | HV_CONFSTR_CHIP_MODEL = 6, | ||
443 | |||
444 | /** Human-readable board description. */ | ||
445 | HV_CONFSTR_BOARD_DESC = 7, | ||
446 | |||
447 | /** Human-readable description of the hypervisor configuration. */ | ||
448 | HV_CONFSTR_HV_CONFIG = 8, | ||
449 | |||
450 | /** Human-readable version string for the boot image (for instance, | ||
451 | * who built it and when, what configuration file was used). */ | ||
452 | HV_CONFSTR_HV_CONFIG_VER = 9, | ||
453 | |||
454 | /** Mezzanine part number. */ | ||
455 | HV_CONFSTR_MEZZ_PART_NUM = 10, | ||
456 | |||
457 | /** Mezzanine serial number. */ | ||
458 | HV_CONFSTR_MEZZ_SERIAL_NUM = 11, | ||
459 | |||
460 | /** Mezzanine revision level. */ | ||
461 | HV_CONFSTR_MEZZ_REV = 12, | ||
462 | |||
463 | /** Human-readable mezzanine description. */ | ||
464 | HV_CONFSTR_MEZZ_DESC = 13, | ||
465 | |||
466 | /** Control path for the onboard network switch. */ | ||
467 | HV_CONFSTR_SWITCH_CONTROL = 14, | ||
468 | |||
469 | /** Chip revision level. */ | ||
470 | HV_CONFSTR_CHIP_REV = 15 | ||
471 | |||
472 | } HV_ConfstrQuery; | ||
473 | |||
474 | /** Query a configuration string from the hypervisor. | ||
475 | * | ||
476 | * @param query Identifier for the specific string to be retrieved | ||
477 | * (HV_CONFSTR_xxx). | ||
478 | * @param buf Buffer in which to place the string. | ||
479 | * @param len Length of the buffer. | ||
480 | * @return If query is valid, then the length of the corresponding string, | ||
481 | * including the trailing null; if this is greater than len, the string | ||
482 | * was truncated. If query is invalid, HV_EINVAL. If the specified | ||
483 | * buffer is not writable by the client, HV_EFAULT. | ||
484 | */ | ||
485 | int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); | ||
486 | |||
487 | /** State object used to enable and disable one-shot and level-sensitive | ||
488 | * interrupts. */ | ||
489 | typedef struct | ||
490 | { | ||
491 | #if CHIP_VA_WIDTH() > 32 | ||
492 | __hv64 opaque[2]; /**< No user-serviceable parts inside */ | ||
493 | #else | ||
494 | __hv32 opaque[2]; /**< No user-serviceable parts inside */ | ||
495 | #endif | ||
496 | } | ||
497 | HV_IntrState; | ||
498 | |||
499 | /** A set of interrupts. */ | ||
500 | typedef __hv32 HV_IntrMask; | ||
501 | |||
502 | /** Tile coordinate */ | ||
503 | typedef struct | ||
504 | { | ||
505 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
506 | int x; | ||
507 | |||
508 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
509 | int y; | ||
510 | } HV_Coord; | ||
511 | |||
512 | /** The low interrupt numbers are reserved for use by the client in | ||
513 | * delivering IPIs. Any interrupt numbers higher than this value are | ||
514 | * reserved for use by HV device drivers. */ | ||
515 | #define HV_MAX_IPI_INTERRUPT 7 | ||
516 | |||
517 | /** Register an interrupt state object. This object is used to enable and | ||
518 | * disable one-shot and level-sensitive interrupts. Once the state is | ||
519 | * registered, the client must not read or write the state object; doing | ||
520 | * so will cause undefined results. | ||
521 | * | ||
522 | * @param intr_state Pointer to interrupt state object. | ||
523 | * @return HV_OK on success, or a hypervisor error code. | ||
524 | */ | ||
525 | HV_Errno hv_dev_register_intr_state(HV_IntrState* intr_state); | ||
526 | |||
527 | /** Enable a set of one-shot and level-sensitive interrupts. | ||
528 | * | ||
529 | * @param intr_state Pointer to interrupt state object. | ||
530 | * @param enab_mask Bitmap of interrupts to enable. | ||
531 | */ | ||
532 | void hv_enable_intr(HV_IntrState* intr_state, HV_IntrMask enab_mask); | ||
533 | |||
534 | /** Disable a set of one-shot and level-sensitive interrupts. | ||
535 | * | ||
536 | * @param intr_state Pointer to interrupt state object. | ||
537 | * @param disab_mask Bitmap of interrupts to disable. | ||
538 | */ | ||
539 | void hv_disable_intr(HV_IntrState* intr_state, HV_IntrMask disab_mask); | ||
540 | |||
541 | /** Trigger a one-shot interrupt on some tile | ||
542 | * | ||
543 | * @param tile Which tile to interrupt. | ||
544 | * @param interrupt Interrupt number to trigger; must be between 0 and | ||
545 | * HV_MAX_IPI_INTERRUPT. | ||
546 | * @return HV_OK on success, or a hypervisor error code. | ||
547 | */ | ||
548 | HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt); | ||
549 | |||
550 | /** Store memory mapping in debug memory so that external debugger can read it. | ||
551 | * A maximum of 16 entries can be stored. | ||
552 | * | ||
553 | * @param va VA of memory that is mapped. | ||
554 | * @param len Length of mapped memory. | ||
555 | * @param pa PA of memory that is mapped. | ||
556 | * @return 0 on success, -1 if the maximum number of mappings is exceeded. | ||
557 | */ | ||
558 | int hv_store_mapping(HV_VirtAddr va, unsigned int len, HV_PhysAddr pa); | ||
559 | |||
560 | /** Given a client PA and a length, return its real (HV) PA. | ||
561 | * | ||
562 | * @param cpa Client physical address. | ||
563 | * @param len Length of mapped memory. | ||
564 | * @return physical address, or -1 if cpa or len is not valid. | ||
565 | */ | ||
566 | HV_PhysAddr hv_inquire_realpa(HV_PhysAddr cpa, unsigned int len); | ||
567 | |||
568 | /** RTC return flag for no RTC chip present. | ||
569 | */ | ||
570 | #define HV_RTC_NO_CHIP 0x1 | ||
571 | |||
572 | /** RTC return flag for low-voltage condition, indicating that battery had | ||
573 | * died and time read is unreliable. | ||
574 | */ | ||
575 | #define HV_RTC_LOW_VOLTAGE 0x2 | ||
576 | |||
577 | /** Date/Time of day */ | ||
578 | typedef struct { | ||
579 | #if CHIP_WORD_SIZE() > 32 | ||
580 | __hv64 tm_sec; /**< Seconds, 0-59 */ | ||
581 | __hv64 tm_min; /**< Minutes, 0-59 */ | ||
582 | __hv64 tm_hour; /**< Hours, 0-23 */ | ||
583 | __hv64 tm_mday; /**< Day of month, 0-30 */ | ||
584 | __hv64 tm_mon; /**< Month, 0-11 */ | ||
585 | __hv64 tm_year; /**< Years since 1900, 0-199 */ | ||
586 | __hv64 flags; /**< Return flags, 0 if no error */ | ||
587 | #else | ||
588 | __hv32 tm_sec; /**< Seconds, 0-59 */ | ||
589 | __hv32 tm_min; /**< Minutes, 0-59 */ | ||
590 | __hv32 tm_hour; /**< Hours, 0-23 */ | ||
591 | __hv32 tm_mday; /**< Day of month, 0-30 */ | ||
592 | __hv32 tm_mon; /**< Month, 0-11 */ | ||
593 | __hv32 tm_year; /**< Years since 1900, 0-199 */ | ||
594 | __hv32 flags; /**< Return flags, 0 if no error */ | ||
595 | #endif | ||
596 | } HV_RTCTime; | ||
597 | |||
598 | /** Read the current time-of-day clock. | ||
599 | * @return HV_RTCTime of current time (GMT). | ||
600 | */ | ||
601 | HV_RTCTime hv_get_rtc(void); | ||
602 | |||
603 | |||
604 | /** Set the current time-of-day clock. | ||
605 | * @param time time to reset time-of-day to (GMT). | ||
606 | */ | ||
607 | void hv_set_rtc(HV_RTCTime time); | ||
608 | |||
609 | /** Installs a context, comprising a page table and other attributes. | ||
610 | * | ||
611 | * Once this service completes, page_table will be used to translate | ||
612 | * subsequent virtual address references to physical memory. | ||
613 | * | ||
614 | * Installing a context does not cause an implicit TLB flush. Before | ||
615 | * reusing an ASID value for a different address space, the client is | ||
616 | * expected to flush old references from the TLB with hv_flush_asid(). | ||
617 | * (Alternately, hv_flush_all() may be used to flush many ASIDs at once.) | ||
618 | * After invalidating a page table entry, changing its attributes, or | ||
619 | * changing its target CPA, the client is expected to flush old references | ||
620 | * from the TLB with hv_flush_page() or hv_flush_pages(). Making a | ||
621 | * previously invalid page valid does not require a flush. | ||
622 | * | ||
623 | * Specifying an invalid ASID, or an invalid CPA (client physical address) | ||
624 | * (either as page_table_pointer, or within the referenced table), | ||
625 | * or another page table data item documented as above as illegal may | ||
626 | * lead to client termination; since the validation of the table is | ||
627 | * done as needed, this may happen before the service returns, or at | ||
628 | * some later time, or never, depending upon the client's pattern of | ||
629 | * memory references. Page table entries which supply translations for | ||
630 | * invalid virtual addresses may result in client termination, or may | ||
631 | * be silently ignored. "Invalid" in this context means a value which | ||
632 | * was not provided to the client via the appropriate hv_inquire_* routine. | ||
633 | * | ||
634 | * To support changing the instruction VAs at the same time as | ||
635 | * installing the new page table, this call explicitly supports | ||
636 | * setting the "lr" register to a different address and then jumping | ||
637 | * directly to the hv_install_context() routine. In this case, the | ||
638 | * new page table does not need to contain any mapping for the | ||
639 | * hv_install_context address itself. | ||
640 | * | ||
641 | * @param page_table Root of the page table. | ||
642 | * @param access PTE providing info on how to read the page table. This | ||
643 | * value must be consistent between multiple tiles sharing a page table, | ||
644 | * and must also be consistent with any virtual mappings the client | ||
645 | * may be using to access the page table. | ||
646 | * @param asid HV_ASID the page table is to be used for. | ||
647 | * @param flags Context flags, denoting attributes or privileges of the | ||
648 | * current context (HV_CTX_xxx). | ||
649 | * @return Zero on success, or a hypervisor error code on failure. | ||
650 | */ | ||
651 | int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid, | ||
652 | __hv32 flags); | ||
653 | |||
654 | #endif /* !__ASSEMBLER__ */ | ||
655 | |||
656 | #define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from | ||
657 | PL0. */ | ||
658 | |||
659 | #ifndef __ASSEMBLER__ | ||
660 | |||
661 | /** Value returned from hv_inquire_context(). */ | ||
662 | typedef struct | ||
663 | { | ||
664 | /** Physical address of page table */ | ||
665 | HV_PhysAddr page_table; | ||
666 | |||
667 | /** PTE which defines access method for top of page table */ | ||
668 | HV_PTE access; | ||
669 | |||
670 | /** ASID associated with this page table */ | ||
671 | HV_ASID asid; | ||
672 | |||
673 | /** Context flags */ | ||
674 | __hv32 flags; | ||
675 | } HV_Context; | ||
676 | |||
677 | /** Retrieve information about the currently installed context. | ||
678 | * @return The data passed to the last successful hv_install_context call. | ||
679 | */ | ||
680 | HV_Context hv_inquire_context(void); | ||
681 | |||
682 | |||
683 | /** Flushes all translations associated with the named address space | ||
684 | * identifier from the TLB and any other hypervisor data structures. | ||
685 | * Translations installed with the "global" bit are not flushed. | ||
686 | * | ||
687 | * Specifying an invalid ASID may lead to client termination. "Invalid" | ||
688 | * in this context means a value which was not provided to the client | ||
689 | * via <tt>hv_inquire_asid()</tt>. | ||
690 | * | ||
691 | * @param asid HV_ASID whose entries are to be flushed. | ||
692 | * @return Zero on success, or a hypervisor error code on failure. | ||
693 | */ | ||
694 | int hv_flush_asid(HV_ASID asid); | ||
695 | |||
696 | |||
697 | /** Flushes all translations associated with the named virtual address | ||
698 | * and page size from the TLB and other hypervisor data structures. Only | ||
699 | * pages visible to the current ASID are affected; note that this includes | ||
700 | * global pages in addition to pages specific to the current ASID. | ||
701 | * | ||
702 | * The supplied VA need not be aligned; it may be anywhere in the | ||
703 | * subject page. | ||
704 | * | ||
705 | * Specifying an invalid virtual address may lead to client termination, | ||
706 | * or may silently succeed. "Invalid" in this context means a value | ||
707 | * which was not provided to the client via hv_inquire_virtual. | ||
708 | * | ||
709 | * @param address Address of the page to flush. | ||
710 | * @param page_size Size of pages to assume. | ||
711 | * @return Zero on success, or a hypervisor error code on failure. | ||
712 | */ | ||
713 | int hv_flush_page(HV_VirtAddr address, HV_PageSize page_size); | ||
714 | |||
715 | |||
716 | /** Flushes all translations associated with the named virtual address range | ||
717 | * and page size from the TLB and other hypervisor data structures. Only | ||
718 | * pages visible to the current ASID are affected; note that this includes | ||
719 | * global pages in addition to pages specific to the current ASID. | ||
720 | * | ||
721 | * The supplied VA need not be aligned; it may be anywhere in the | ||
722 | * subject page. | ||
723 | * | ||
724 | * Specifying an invalid virtual address may lead to client termination, | ||
725 | * or may silently succeed. "Invalid" in this context means a value | ||
726 | * which was not provided to the client via hv_inquire_virtual. | ||
727 | * | ||
728 | * @param start Address to flush. | ||
729 | * @param page_size Size of pages to assume. | ||
730 | * @param size The number of bytes to flush. Any page in the range | ||
731 | * [start, start + size) will be flushed from the TLB. | ||
732 | * @return Zero on success, or a hypervisor error code on failure. | ||
733 | */ | ||
734 | int hv_flush_pages(HV_VirtAddr start, HV_PageSize page_size, | ||
735 | unsigned long size); | ||
736 | |||
737 | |||
738 | /** Flushes all non-global translations (if preserve_global is true), | ||
739 | * or absolutely all translations (if preserve_global is false). | ||
740 | * | ||
741 | * @param preserve_global Non-zero if we want to preserve "global" mappings. | ||
742 | * @return Zero on success, or a hypervisor error code on failure. | ||
743 | */ | ||
744 | int hv_flush_all(int preserve_global); | ||
745 | |||
746 | |||
747 | /** Restart machine with optional restart command and optional args. | ||
748 | * @param cmd Const pointer to command to restart with, or NULL | ||
749 | * @param args Const pointer to argument string to restart with, or NULL | ||
750 | */ | ||
751 | void hv_restart(HV_VirtAddr cmd, HV_VirtAddr args); | ||
752 | |||
753 | |||
754 | /** Halt machine. */ | ||
755 | void hv_halt(void); | ||
756 | |||
757 | |||
758 | /** Power off machine. */ | ||
759 | void hv_power_off(void); | ||
760 | |||
761 | |||
762 | /** Re-enter virtual-is-physical memory translation mode and restart | ||
763 | * execution at a given address. | ||
764 | * @param entry Client physical address at which to begin execution. | ||
765 | * @return A hypervisor error code on failure; if the operation is | ||
766 | * successful the call does not return. | ||
767 | */ | ||
768 | int hv_reexec(HV_PhysAddr entry); | ||
769 | |||
770 | |||
771 | /** Chip topology */ | ||
772 | typedef struct | ||
773 | { | ||
774 | /** Relative coordinates of the querying tile */ | ||
775 | HV_Coord coord; | ||
776 | |||
777 | /** Width of the querying supervisor's tile rectangle. */ | ||
778 | int width; | ||
779 | |||
780 | /** Height of the querying supervisor's tile rectangle. */ | ||
781 | int height; | ||
782 | |||
783 | } HV_Topology; | ||
784 | |||
785 | /** Returns information about the tile coordinate system. | ||
786 | * | ||
787 | * Each supervisor is given a rectangle of tiles it potentially controls. | ||
788 | * These tiles are labeled using a relative coordinate system with (0,0) as | ||
789 | * the upper left tile regardless of their physical location on the chip. | ||
790 | * | ||
791 | * This call returns both the size of that rectangle and the position | ||
792 | * within that rectangle of the querying tile. | ||
793 | * | ||
794 | * Not all tiles within that rectangle may be available to the supervisor; | ||
795 | * to get the precise set of available tiles, you must also call | ||
796 | * hv_inquire_tiles(HV_INQ_TILES_AVAIL, ...). | ||
797 | **/ | ||
798 | HV_Topology hv_inquire_topology(void); | ||
799 | |||
800 | /** Sets of tiles we can retrieve with hv_inquire_tiles(). | ||
801 | * | ||
802 | * These numbers are part of the binary API and guaranteed not to change. | ||
803 | */ | ||
804 | typedef enum { | ||
805 | /** An invalid value; do not use. */ | ||
806 | _HV_INQ_TILES_RESERVED = 0, | ||
807 | |||
808 | /** All available tiles within the supervisor's tile rectangle. */ | ||
809 | HV_INQ_TILES_AVAIL = 1, | ||
810 | |||
811 | /** The set of tiles used for hash-for-home caching. */ | ||
812 | HV_INQ_TILES_HFH_CACHE = 2, | ||
813 | |||
814 | /** The set of tiles that can be legally used as a LOTAR for a PTE. */ | ||
815 | HV_INQ_TILES_LOTAR = 3 | ||
816 | } HV_InqTileSet; | ||
817 | |||
818 | /** Returns specific information about various sets of tiles within the | ||
819 | * supervisor's tile rectangle. | ||
820 | * | ||
821 | * @param set Which set of tiles to retrieve. | ||
822 | * @param cpumask Pointer to a returned bitmask (in row-major order, | ||
823 | * supervisor-relative) of tiles. The low bit of the first word | ||
824 | * corresponds to the tile at the upper left-hand corner of the | ||
825 | * supervisor's rectangle. In order for the supervisor to know the | ||
826 | * buffer length to supply, it should first call hv_inquire_topology. | ||
827 | * @param length Number of bytes available for the returned bitmask. | ||
828 | **/ | ||
829 | HV_Errno hv_inquire_tiles(HV_InqTileSet set, HV_VirtAddr cpumask, int length); | ||
830 | |||
831 | |||
832 | /** An identifier for a memory controller. Multiple memory controllers | ||
833 | * may be connected to one chip, and this uniquely identifies each one. | ||
834 | */ | ||
835 | typedef int HV_MemoryController; | ||
836 | |||
837 | /** A range of physical memory. */ | ||
838 | typedef struct | ||
839 | { | ||
840 | HV_PhysAddr start; /**< Starting address. */ | ||
841 | __hv64 size; /**< Size in bytes. */ | ||
842 | HV_MemoryController controller; /**< Which memory controller owns this. */ | ||
843 | } HV_PhysAddrRange; | ||
844 | |||
845 | /** Returns information about a range of physical memory. | ||
846 | * | ||
847 | * hv_inquire_physical() returns one of the ranges of client | ||
848 | * physical addresses which are available to this client. | ||
849 | * | ||
850 | * The first range is retrieved by specifying an idx of 0, and | ||
851 | * successive ranges are returned with subsequent idx values. Ranges | ||
852 | * are ordered by increasing start address (i.e., as idx increases, | ||
853 | * so does start), do not overlap, and do not touch (i.e., the | ||
854 | * available memory is described with the fewest possible ranges). | ||
855 | * | ||
856 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
857 | * A client can count the number of ranges by increasing idx until the | ||
858 | * returned size is zero. There will always be at least one valid range. | ||
859 | * | ||
860 | * Some clients might not be prepared to deal with more than one | ||
861 | * physical address range; they still ought to call this routine and | ||
862 | * issue a warning message if they're given more than one range, on the | ||
863 | * theory that whoever configured the hypervisor to provide that memory | ||
864 | * should know that it's being wasted. | ||
865 | */ | ||
866 | HV_PhysAddrRange hv_inquire_physical(int idx); | ||
867 | |||
868 | |||
869 | /** Memory controller information. */ | ||
870 | typedef struct | ||
871 | { | ||
872 | HV_Coord coord; /**< Relative tile coordinates of the port used by a | ||
873 | specified tile to communicate with this controller. */ | ||
874 | __hv64 speed; /**< Speed of this controller in bytes per second. */ | ||
875 | } HV_MemoryControllerInfo; | ||
876 | |||
877 | /** Returns information about a particular memory controller. | ||
878 | * | ||
879 | * hv_inquire_memory_controller(coord,idx) returns information about a | ||
880 | * particular controller. Two pieces of information are returned: | ||
881 | * - The relative coordinates of the port on the controller that the specified | ||
882 | * tile would use to contact it. The relative coordinates may lie | ||
883 | * outside the supervisor's rectangle, i.e. the controller may not | ||
884 | * be attached to a node managed by the querying node's supervisor. | ||
885 | * In particular note that x or y may be negative. | ||
886 | * - The speed of the memory controller. (This is a not-to-exceed value | ||
887 | * based on the raw hardware data rate, and may not be achievable in | ||
888 | * practice; it is provided to give clients information on the relative | ||
889 | * performance of the available controllers.) | ||
890 | * | ||
891 | * Clients should avoid calling this interface with invalid values. | ||
892 | * A client who does may be terminated. | ||
893 | * @param coord Tile for which to calculate the relative port position. | ||
894 | * @param controller Index of the controller; identical to value returned | ||
895 | * from other routines like hv_inquire_physical. | ||
896 | * @return Information about the controller. | ||
897 | */ | ||
898 | HV_MemoryControllerInfo hv_inquire_memory_controller(HV_Coord coord, | ||
899 | int controller); | ||
900 | |||
901 | |||
902 | /** A range of virtual memory. */ | ||
903 | typedef struct | ||
904 | { | ||
905 | HV_VirtAddr start; /**< Starting address. */ | ||
906 | __hv64 size; /**< Size in bytes. */ | ||
907 | } HV_VirtAddrRange; | ||
908 | |||
909 | /** Returns information about a range of virtual memory. | ||
910 | * | ||
911 | * hv_inquire_virtual() returns one of the ranges of client | ||
912 | * virtual addresses which are available to this client. | ||
913 | * | ||
914 | * The first range is retrieved by specifying an idx of 0, and | ||
915 | * successive ranges are returned with subsequent idx values. Ranges | ||
916 | * are ordered by increasing start address (i.e., as idx increases, | ||
917 | * so does start), do not overlap, and do not touch (i.e., the | ||
918 | * available memory is described with the fewest possible ranges). | ||
919 | * | ||
920 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
921 | * A client can count the number of ranges by increasing idx until the | ||
922 | * returned size is zero. There will always be at least one valid range. | ||
923 | * | ||
924 | * Some clients may well have various virtual addresses hardwired | ||
925 | * into themselves; for instance, their instruction stream may | ||
926 | * have been compiled expecting to live at a particular address. | ||
927 | * Such clients should use this interface to verify they've been | ||
928 | * given the virtual address space they expect, and issue a (potentially | ||
929 | * fatal) warning message otherwise. | ||
930 | * | ||
931 | * Note that the returned size is a __hv64, not a __hv32, so it is | ||
932 | * possible to express a single range spanning the entire 32-bit | ||
933 | * address space. | ||
934 | */ | ||
935 | HV_VirtAddrRange hv_inquire_virtual(int idx); | ||
936 | |||
937 | |||
938 | /** A range of ASID values. */ | ||
939 | typedef struct | ||
940 | { | ||
941 | HV_ASID start; /**< First ASID in the range. */ | ||
942 | unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */ | ||
943 | } HV_ASIDRange; | ||
944 | |||
945 | /** Returns information about a range of ASIDs. | ||
946 | * | ||
947 | * hv_inquire_asid() returns one of the ranges of address | ||
948 | * space identifiers which are available to this client. | ||
949 | * | ||
950 | * The first range is retrieved by specifying an idx of 0, and | ||
951 | * successive ranges are returned with subsequent idx values. Ranges | ||
952 | * are ordered by increasing start value (i.e., as idx increases, | ||
953 | * so does start), do not overlap, and do not touch (i.e., the | ||
954 | * available ASIDs are described with the fewest possible ranges). | ||
955 | * | ||
956 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
957 | * A client can count the number of ranges by increasing idx until the | ||
958 | * returned size is zero. There will always be at least one valid range. | ||
959 | */ | ||
960 | HV_ASIDRange hv_inquire_asid(int idx); | ||
961 | |||
962 | |||
963 | /** Waits for at least the specified number of nanoseconds then returns. | ||
964 | * | ||
965 | * @param nanosecs The number of nanoseconds to sleep. | ||
966 | */ | ||
967 | void hv_nanosleep(int nanosecs); | ||
968 | |||
969 | |||
970 | /** Reads a character from the console without blocking. | ||
971 | * | ||
972 | * @return A value from 0-255 indicates the value successfully read. | ||
973 | * A negative value means no value was ready. | ||
974 | */ | ||
975 | int hv_console_read_if_ready(void); | ||
976 | |||
977 | |||
978 | /** Writes a character to the console, blocking if the console is busy. | ||
979 | * | ||
980 | * This call cannot fail. If the console is broken for some reason, | ||
981 | * output will simply vanish. | ||
982 | * @param byte Character to write. | ||
983 | */ | ||
984 | void hv_console_putc(int byte); | ||
985 | |||
986 | |||
987 | /** Writes a string to the console, blocking if the console is busy. | ||
988 | * @param bytes Pointer to characters to write. | ||
989 | * @param len Number of characters to write. | ||
990 | * @return Number of characters written, or HV_EFAULT if the buffer is invalid. | ||
991 | */ | ||
992 | int hv_console_write(HV_VirtAddr bytes, int len); | ||
993 | |||
994 | |||
995 | /** Dispatch the next interrupt from the client downcall mechanism. | ||
996 | * | ||
997 | * The hypervisor uses downcalls to notify the client of asynchronous | ||
998 | * events. Some of these events are hypervisor-created (like incoming | ||
999 | * messages). Some are regular interrupts which initially occur in | ||
1000 | * the hypervisor, and are normally handled directly by the client; | ||
1001 | * when these occur in a client's interrupt critical section, they must | ||
1002 | * be delivered through the downcall mechanism. | ||
1003 | * | ||
1004 | * A downcall is initially delivered to the client as an INTCTRL_1 | ||
1005 | * interrupt. Upon entry to the INTCTRL_1 vector, the client must | ||
1006 | * immediately invoke the hv_downcall_dispatch service. This service | ||
1007 | * will not return; instead it will cause one of the client's actual | ||
1008 | * downcall-handling interrupt vectors to be entered. The EX_CONTEXT | ||
1009 | * registers in the client will be set so that when the client irets, | ||
1010 | * it will return to the code which was interrupted by the INTCTRL_1 | ||
1011 | * interrupt. | ||
1012 | * | ||
1013 | * Any saving of registers should be done by the actual handling | ||
1014 | * vectors; no registers should be changed by the INTCTRL_1 handler. | ||
1015 | * In particular, the client should not use a jal instruction to invoke | ||
1016 | * the hv_downcall_dispatch service, as that would overwrite the client's | ||
1017 | * lr register. Note that the hv_downcall_dispatch service may overwrite | ||
1018 | * one or more of the client's system save registers. | ||
1019 | * | ||
1020 | * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor | ||
1021 | * will set this register to cause a downcall to happen, and will clear | ||
1022 | * it when no further downcalls are pending. | ||
1023 | * | ||
1024 | * When a downcall vector is entered, the INTCTRL_1 interrupt will be | ||
1025 | * masked. When the client is done processing a downcall, and is ready | ||
1026 | * to accept another, it must unmask this interrupt; if more downcalls | ||
1027 | * are pending, this will cause the INTCTRL_1 vector to be reentered. | ||
1028 | * Currently the following interrupt vectors can be entered through a | ||
1029 | * downcall: | ||
1030 | * | ||
1031 | * INT_MESSAGE_RCV_DWNCL (hypervisor message available) | ||
1032 | * INT_DMATLB_MISS_DWNCL (DMA TLB miss) | ||
1033 | * INT_SNITLB_MISS_DWNCL (SNI TLB miss) | ||
1034 | * INT_DMATLB_ACCESS_DWNCL (DMA TLB access violation) | ||
1035 | */ | ||
1036 | void hv_downcall_dispatch(void); | ||
1037 | |||
1038 | #endif /* !__ASSEMBLER__ */ | ||
1039 | |||
1040 | /** We use actual interrupt vectors which never occur (they're only there | ||
1041 | * to allow setting MPLs for related SPRs) for our downcall vectors. | ||
1042 | */ | ||
1043 | /** Message receive downcall interrupt vector */ | ||
1044 | #define INT_MESSAGE_RCV_DWNCL INT_BOOT_ACCESS | ||
1045 | /** DMA TLB miss downcall interrupt vector */ | ||
1046 | #define INT_DMATLB_MISS_DWNCL INT_DMA_ASID | ||
1047 | /** Static nework processor instruction TLB miss interrupt vector */ | ||
1048 | #define INT_SNITLB_MISS_DWNCL INT_SNI_ASID | ||
1049 | /** DMA TLB access violation downcall interrupt vector */ | ||
1050 | #define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL | ||
1051 | /** Device interrupt downcall interrupt vector */ | ||
1052 | #define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS | ||
1053 | |||
1054 | #ifndef __ASSEMBLER__ | ||
1055 | |||
1056 | /** Requests the inode for a specific full pathname. | ||
1057 | * | ||
1058 | * Performs a lookup in the hypervisor filesystem for a given filename. | ||
1059 | * Multiple calls with the same filename will always return the same inode. | ||
1060 | * If there is no such filename, HV_ENOENT is returned. | ||
1061 | * A bad filename pointer may result in HV_EFAULT instead. | ||
1062 | * | ||
1063 | * @param filename Constant pointer to name of requested file | ||
1064 | * @return Inode of requested file | ||
1065 | */ | ||
1066 | int hv_fs_findfile(HV_VirtAddr filename); | ||
1067 | |||
1068 | |||
1069 | /** Data returned from an fstat request. | ||
1070 | * Note that this structure should be no more than 40 bytes in size so | ||
1071 | * that it can always be returned completely in registers. | ||
1072 | */ | ||
1073 | typedef struct | ||
1074 | { | ||
1075 | int size; /**< Size of file (or HV_Errno on error) */ | ||
1076 | unsigned int flags; /**< Flags (see HV_FS_FSTAT_FLAGS) */ | ||
1077 | } HV_FS_StatInfo; | ||
1078 | |||
1079 | /** Bitmask flags for fstat request */ | ||
1080 | typedef enum | ||
1081 | { | ||
1082 | HV_FS_ISDIR = 0x0001 /**< Is the entry a directory? */ | ||
1083 | } HV_FS_FSTAT_FLAGS; | ||
1084 | |||
1085 | /** Get stat information on a given file inode. | ||
1086 | * | ||
1087 | * Return information on the file with the given inode. | ||
1088 | * | ||
1089 | * IF the HV_FS_ISDIR bit is set, the "file" is a directory. Reading | ||
1090 | * it will return NUL-separated filenames (no directory part) relative | ||
1091 | * to the path to the inode of the directory "file". These can be | ||
1092 | * appended to the path to the directory "file" after a forward slash | ||
1093 | * to create additional filenames. Note that it is not required | ||
1094 | * that all valid paths be decomposable into valid parent directories; | ||
1095 | * a filesystem may validly have just a few files, none of which have | ||
1096 | * HV_FS_ISDIR set. However, if clients may wish to enumerate the | ||
1097 | * files in the filesystem, it is recommended to include all the | ||
1098 | * appropriate parent directory "files" to give a consistent view. | ||
1099 | * | ||
1100 | * An invalid file inode will cause an HV_EBADF error to be returned. | ||
1101 | * | ||
1102 | * @param inode The inode number of the query | ||
1103 | * @return An HV_FS_StatInfo structure | ||
1104 | */ | ||
1105 | HV_FS_StatInfo hv_fs_fstat(int inode); | ||
1106 | |||
1107 | |||
1108 | /** Read data from a specific hypervisor file. | ||
1109 | * On error, may return HV_EBADF for a bad inode or HV_EFAULT for a bad buf. | ||
1110 | * Reads near the end of the file will return fewer bytes than requested. | ||
1111 | * Reads at or beyond the end of a file will return zero. | ||
1112 | * | ||
1113 | * @param inode the hypervisor file to read | ||
1114 | * @param buf the buffer to read data into | ||
1115 | * @param length the number of bytes of data to read | ||
1116 | * @param offset the offset into the file to read the data from | ||
1117 | * @return number of bytes successfully read, or an HV_Errno code | ||
1118 | */ | ||
1119 | int hv_fs_pread(int inode, HV_VirtAddr buf, int length, int offset); | ||
1120 | |||
1121 | |||
1122 | /** Read a 64-bit word from the specified physical address. | ||
1123 | * The address must be 8-byte aligned. | ||
1124 | * Specifying an invalid physical address will lead to client termination. | ||
1125 | * @param addr The physical address to read | ||
1126 | * @param access The PTE describing how to read the memory | ||
1127 | * @return The 64-bit value read from the given address | ||
1128 | */ | ||
1129 | unsigned long long hv_physaddr_read64(HV_PhysAddr addr, HV_PTE access); | ||
1130 | |||
1131 | |||
1132 | /** Write a 64-bit word to the specified physical address. | ||
1133 | * The address must be 8-byte aligned. | ||
1134 | * Specifying an invalid physical address will lead to client termination. | ||
1135 | * @param addr The physical address to write | ||
1136 | * @param access The PTE that says how to write the memory | ||
1137 | * @param val The 64-bit value to write to the given address | ||
1138 | */ | ||
1139 | void hv_physaddr_write64(HV_PhysAddr addr, HV_PTE access, | ||
1140 | unsigned long long val); | ||
1141 | |||
1142 | |||
1143 | /** Get the value of the command-line for the supervisor, if any. | ||
1144 | * This will not include the filename of the booted supervisor, but may | ||
1145 | * include configured-in boot arguments or the hv_restart() arguments. | ||
1146 | * If the buffer is not long enough the hypervisor will NUL the first | ||
1147 | * character of the buffer but not write any other data. | ||
1148 | * @param buf The virtual address to write the command-line string to. | ||
1149 | * @param length The length of buf, in characters. | ||
1150 | * @return The actual length of the command line, including the trailing NUL | ||
1151 | * (may be larger than "length"). | ||
1152 | */ | ||
1153 | int hv_get_command_line(HV_VirtAddr buf, int length); | ||
1154 | |||
1155 | |||
1156 | /** Set a new value for the command-line for the supervisor, which will | ||
1157 | * be returned from subsequent invocations of hv_get_command_line() on | ||
1158 | * this tile. | ||
1159 | * @param buf The virtual address to read the command-line string from. | ||
1160 | * @param length The length of buf, in characters; must be no more than | ||
1161 | * HV_COMMAND_LINE_LEN. | ||
1162 | * @return Zero if successful, or a hypervisor error code. | ||
1163 | */ | ||
1164 | HV_Errno hv_set_command_line(HV_VirtAddr buf, int length); | ||
1165 | |||
1166 | /** Maximum size of a command line passed to hv_set_command_line(); note | ||
1167 | * that a line returned from hv_get_command_line() could be larger than | ||
1168 | * this.*/ | ||
1169 | #define HV_COMMAND_LINE_LEN 256 | ||
1170 | |||
1171 | /** Tell the hypervisor how to cache non-priority pages | ||
1172 | * (its own as well as pages explicitly represented in page tables). | ||
1173 | * Normally these will be represented as red/black pages, but | ||
1174 | * when the supervisor starts to allocate "priority" pages in the PTE | ||
1175 | * the hypervisor will need to start marking those pages as (e.g.) "red" | ||
1176 | * and non-priority pages as either "black" (if they cache-alias | ||
1177 | * with the existing priority pages) or "red/black" (if they don't). | ||
1178 | * The bitmask provides information on which parts of the cache | ||
1179 | * have been used for pinned pages so far on this tile; if (1 << N) | ||
1180 | * appears in the bitmask, that indicates that a page has been marked | ||
1181 | * "priority" whose PFN equals N, mod 8. | ||
1182 | * @param bitmask A bitmap of priority page set values | ||
1183 | */ | ||
1184 | void hv_set_caching(unsigned int bitmask); | ||
1185 | |||
1186 | |||
1187 | /** Zero out a specified number of pages. | ||
1188 | * The va and size must both be multiples of 4096. | ||
1189 | * Caches are bypassed and memory is directly set to zero. | ||
1190 | * This API is implemented only in the magic hypervisor and is intended | ||
1191 | * to provide a performance boost to the minimal supervisor by | ||
1192 | * giving it a fast way to zero memory pages when allocating them. | ||
1193 | * @param va Virtual address where the page has been mapped | ||
1194 | * @param size Number of bytes (must be a page size multiple) | ||
1195 | */ | ||
1196 | void hv_bzero_page(HV_VirtAddr va, unsigned int size); | ||
1197 | |||
1198 | |||
1199 | /** State object for the hypervisor messaging subsystem. */ | ||
1200 | typedef struct | ||
1201 | { | ||
1202 | #if CHIP_VA_WIDTH() > 32 | ||
1203 | __hv64 opaque[2]; /**< No user-serviceable parts inside */ | ||
1204 | #else | ||
1205 | __hv32 opaque[2]; /**< No user-serviceable parts inside */ | ||
1206 | #endif | ||
1207 | } | ||
1208 | HV_MsgState; | ||
1209 | |||
1210 | /** Register to receive incoming messages. | ||
1211 | * | ||
1212 | * This routine configures the current tile so that it can receive | ||
1213 | * incoming messages. It must be called before the client can receive | ||
1214 | * messages with the hv_receive_message routine, and must be called on | ||
1215 | * each tile which will receive messages. | ||
1216 | * | ||
1217 | * msgstate is the virtual address of a state object of type HV_MsgState. | ||
1218 | * Once the state is registered, the client must not read or write the | ||
1219 | * state object; doing so will cause undefined results. | ||
1220 | * | ||
1221 | * If this routine is called with msgstate set to 0, the client's message | ||
1222 | * state will be freed and it will no longer be able to receive messages. | ||
1223 | * Note that this may cause the loss of any as-yet-undelivered messages | ||
1224 | * for the client. | ||
1225 | * | ||
1226 | * If another client attempts to send a message to a client which has | ||
1227 | * not yet called hv_register_message_state, or which has freed its | ||
1228 | * message state, the message will not be delivered, as if the client | ||
1229 | * had insufficient buffering. | ||
1230 | * | ||
1231 | * This routine returns HV_OK if the registration was successful, and | ||
1232 | * HV_EINVAL if the supplied state object is unsuitable. Note that some | ||
1233 | * errors may not be detected during this routine, but might be detected | ||
1234 | * during a subsequent message delivery. | ||
1235 | * @param msgstate State object. | ||
1236 | **/ | ||
1237 | HV_Errno hv_register_message_state(HV_MsgState* msgstate); | ||
1238 | |||
1239 | /** Possible message recipient states. */ | ||
1240 | typedef enum | ||
1241 | { | ||
1242 | HV_TO_BE_SENT, /**< Not sent (not attempted, or recipient not ready) */ | ||
1243 | HV_SENT, /**< Successfully sent */ | ||
1244 | HV_BAD_RECIP /**< Bad recipient coordinates (permanent error) */ | ||
1245 | } HV_Recip_State; | ||
1246 | |||
1247 | /** Message recipient. */ | ||
1248 | typedef struct | ||
1249 | { | ||
1250 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
1251 | unsigned int x:11; | ||
1252 | |||
1253 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
1254 | unsigned int y:11; | ||
1255 | |||
1256 | /** Status of this recipient */ | ||
1257 | HV_Recip_State state:10; | ||
1258 | } HV_Recipient; | ||
1259 | |||
1260 | /** Send a message to a set of recipients. | ||
1261 | * | ||
1262 | * This routine sends a message to a set of recipients. | ||
1263 | * | ||
1264 | * recips is an array of HV_Recipient structures. Each specifies a tile, | ||
1265 | * and a message state; initially, it is expected that the state will | ||
1266 | * be set to HV_TO_BE_SENT. nrecip specifies the number of recipients | ||
1267 | * in the recips array. | ||
1268 | * | ||
1269 | * For each recipient whose state is HV_TO_BE_SENT, the hypervisor attempts | ||
1270 | * to send that tile the specified message. In order to successfully | ||
1271 | * receive the message, the receiver must be a valid tile to which the | ||
1272 | * sender has access, must not be the sending tile itself, and must have | ||
1273 | * sufficient free buffer space. (The hypervisor guarantees that each | ||
1274 | * tile which has called hv_register_message_state() will be able to | ||
1275 | * buffer one message from every other tile which can legally send to it; | ||
1276 | * more space may be provided but is not guaranteed.) If an invalid tile | ||
1277 | * is specified, the recipient's state is set to HV_BAD_RECIP; this is a | ||
1278 | * permanent delivery error. If the message is successfully delivered | ||
1279 | * to the recipient's buffer, the recipient's state is set to HV_SENT. | ||
1280 | * Otherwise, the recipient's state is unchanged. Message delivery is | ||
1281 | * synchronous; all attempts to send messages are completed before this | ||
1282 | * routine returns. | ||
1283 | * | ||
1284 | * If no permanent delivery errors were encountered, the routine returns | ||
1285 | * the number of messages successfully sent: that is, the number of | ||
1286 | * recipients whose states changed from HV_TO_BE_SENT to HV_SENT during | ||
1287 | * this operation. If any permanent delivery errors were encountered, | ||
1288 | * the routine returns HV_ERECIP. In the event of permanent delivery | ||
1289 | * errors, it may be the case that delivery was not attempted to all | ||
1290 | * recipients; if any messages were succesfully delivered, however, | ||
1291 | * recipients' state values will be updated appropriately. | ||
1292 | * | ||
1293 | * It is explicitly legal to specify a recipient structure whose state | ||
1294 | * is not HV_TO_BE_SENT; such a recipient is ignored. One suggested way | ||
1295 | * of using hv_send_message to send a message to multiple tiles is to set | ||
1296 | * up a list of recipients, and then call the routine repeatedly with the | ||
1297 | * same list, each time accumulating the number of messages successfully | ||
1298 | * sent, until all messages are sent, a permanent error is encountered, | ||
1299 | * or the desired number of attempts have been made. When used in this | ||
1300 | * way, the routine will deliver each message no more than once to each | ||
1301 | * recipient. | ||
1302 | * | ||
1303 | * Note that a message being successfully delivered to the recipient's | ||
1304 | * buffer space does not guarantee that it is received by the recipient, | ||
1305 | * either immediately or at any time in the future; the recipient might | ||
1306 | * never call hv_receive_message, or could register a different state | ||
1307 | * buffer, losing the message. | ||
1308 | * | ||
1309 | * Specifiying the same recipient more than once in the recipient list | ||
1310 | * is an error, which will not result in an error return but which may | ||
1311 | * or may not result in more than one message being delivered to the | ||
1312 | * recipient tile. | ||
1313 | * | ||
1314 | * buf and buflen specify the message to be sent. buf is a virtual address | ||
1315 | * which must be currently mapped in the client's page table; if not, the | ||
1316 | * routine returns HV_EFAULT. buflen must be greater than zero and less | ||
1317 | * than or equal to HV_MAX_MESSAGE_SIZE, and nrecip must be less than the | ||
1318 | * number of tiles to which the sender has access; if not, the routine | ||
1319 | * returns HV_EINVAL. | ||
1320 | * @param recips List of recipients. | ||
1321 | * @param nrecip Number of recipients. | ||
1322 | * @param buf Address of message data. | ||
1323 | * @param buflen Length of message data. | ||
1324 | **/ | ||
1325 | int hv_send_message(HV_Recipient *recips, int nrecip, | ||
1326 | HV_VirtAddr buf, int buflen); | ||
1327 | |||
1328 | /** Maximum hypervisor message size, in bytes */ | ||
1329 | #define HV_MAX_MESSAGE_SIZE 28 | ||
1330 | |||
1331 | |||
1332 | /** Return value from hv_receive_message() */ | ||
1333 | typedef struct | ||
1334 | { | ||
1335 | int msglen; /**< Message length in bytes, or an error code */ | ||
1336 | __hv32 source; /**< Code identifying message sender (HV_MSG_xxx) */ | ||
1337 | } HV_RcvMsgInfo; | ||
1338 | |||
1339 | #define HV_MSG_TILE 0x0 /**< Message source is another tile */ | ||
1340 | #define HV_MSG_INTR 0x1 /**< Message source is a driver interrupt */ | ||
1341 | |||
1342 | /** Receive a message. | ||
1343 | * | ||
1344 | * This routine retrieves a message from the client's incoming message | ||
1345 | * buffer. | ||
1346 | * | ||
1347 | * Multiple messages sent from a particular sending tile to a particular | ||
1348 | * receiving tile are received in the order that they were sent; however, | ||
1349 | * no ordering is guaranteed between messages sent by different tiles. | ||
1350 | * | ||
1351 | * Whenever the a client's message buffer is empty, the first message | ||
1352 | * subsequently received will cause the client's MESSAGE_RCV_DWNCL | ||
1353 | * interrupt vector to be invoked through the interrupt downcall mechanism | ||
1354 | * (see the description of the hv_downcall_dispatch() routine for details | ||
1355 | * on downcalls). | ||
1356 | * | ||
1357 | * Another message-available downcall will not occur until a call to | ||
1358 | * this routine is made when the message buffer is empty, and a message | ||
1359 | * subsequently arrives. Note that such a downcall could occur while | ||
1360 | * this routine is executing. If the calling code does not wish this | ||
1361 | * to happen, it is recommended that this routine be called with the | ||
1362 | * INTCTRL_1 interrupt masked, or inside an interrupt critical section. | ||
1363 | * | ||
1364 | * msgstate is the value previously passed to hv_register_message_state(). | ||
1365 | * buf is the virtual address of the buffer into which the message will | ||
1366 | * be written; buflen is the length of the buffer. | ||
1367 | * | ||
1368 | * This routine returns an HV_RcvMsgInfo structure. The msglen member | ||
1369 | * of that structure is the length of the message received, zero if no | ||
1370 | * message is available, or HV_E2BIG if the message is too large for the | ||
1371 | * specified buffer. If the message is too large, it is not consumed, | ||
1372 | * and may be retrieved by a subsequent call to this routine specifying | ||
1373 | * a sufficiently large buffer. A buffer which is HV_MAX_MESSAGE_SIZE | ||
1374 | * bytes long is guaranteed to be able to receive any possible message. | ||
1375 | * | ||
1376 | * The source member of the HV_RcvMsgInfo structure describes the sender | ||
1377 | * of the message. For messages sent by another client tile via an | ||
1378 | * hv_send_message() call, this value is HV_MSG_TILE; for messages sent | ||
1379 | * as a result of a device interrupt, this value is HV_MSG_INTR. | ||
1380 | */ | ||
1381 | |||
1382 | HV_RcvMsgInfo hv_receive_message(HV_MsgState msgstate, HV_VirtAddr buf, | ||
1383 | int buflen); | ||
1384 | |||
1385 | |||
1386 | /** Start remaining tiles owned by this supervisor. Initially, only one tile | ||
1387 | * executes the client program; after it calls this service, the other tiles | ||
1388 | * are started. This allows the initial tile to do one-time configuration | ||
1389 | * of shared data structures without having to lock them against simultaneous | ||
1390 | * access. | ||
1391 | */ | ||
1392 | void hv_start_all_tiles(void); | ||
1393 | |||
1394 | |||
1395 | /** Open a hypervisor device. | ||
1396 | * | ||
1397 | * This service initializes an I/O device and its hypervisor driver software, | ||
1398 | * and makes it available for use. The open operation is per-device per-chip; | ||
1399 | * once it has been performed, the device handle returned may be used in other | ||
1400 | * device services calls made by any tile. | ||
1401 | * | ||
1402 | * @param name Name of the device. A base device name is just a text string | ||
1403 | * (say, "pcie"). If there is more than one instance of a device, the | ||
1404 | * base name is followed by a slash and a device number (say, "pcie/0"). | ||
1405 | * Some devices may support further structure beneath those components; | ||
1406 | * most notably, devices which require control operations do so by | ||
1407 | * supporting reads and/or writes to a control device whose name | ||
1408 | * includes a trailing "/ctl" (say, "pcie/0/ctl"). | ||
1409 | * @param flags Flags (HV_DEV_xxx). | ||
1410 | * @return A positive integer device handle, or a negative error code. | ||
1411 | */ | ||
1412 | int hv_dev_open(HV_VirtAddr name, __hv32 flags); | ||
1413 | |||
1414 | |||
1415 | /** Close a hypervisor device. | ||
1416 | * | ||
1417 | * This service uninitializes an I/O device and its hypervisor driver | ||
1418 | * software, and makes it unavailable for use. The close operation is | ||
1419 | * per-device per-chip; once it has been performed, the device is no longer | ||
1420 | * available. Normally there is no need to ever call the close service. | ||
1421 | * | ||
1422 | * @param devhdl Device handle of the device to be closed. | ||
1423 | * @return Zero if the close is successful, otherwise, a negative error code. | ||
1424 | */ | ||
1425 | int hv_dev_close(int devhdl); | ||
1426 | |||
1427 | |||
1428 | /** Read data from a hypervisor device synchronously. | ||
1429 | * | ||
1430 | * This service transfers data from a hypervisor device to a memory buffer. | ||
1431 | * When the service returns, the data has been written from the memory buffer, | ||
1432 | * and the buffer will not be further modified by the driver. | ||
1433 | * | ||
1434 | * No ordering is guaranteed between requests issued from different tiles. | ||
1435 | * | ||
1436 | * Devices may choose to support both the synchronous and asynchronous read | ||
1437 | * operations, only one of them, or neither of them. | ||
1438 | * | ||
1439 | * @param devhdl Device handle of the device to be read from. | ||
1440 | * @param flags Flags (HV_DEV_xxx). | ||
1441 | * @param va Virtual address of the target data buffer. This buffer must | ||
1442 | * be mapped in the currently installed page table; if not, HV_EFAULT | ||
1443 | * may be returned. | ||
1444 | * @param len Number of bytes to be transferred. | ||
1445 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1446 | * often a byte offset from the beginning of the device; in other cases, | ||
1447 | * like on a control device, it may have a different meaning. | ||
1448 | * @return A non-negative value if the read was at least partially successful; | ||
1449 | * otherwise, a negative error code. The precise interpretation of | ||
1450 | * the return value is driver-dependent, but many drivers will return | ||
1451 | * the number of bytes successfully transferred. | ||
1452 | */ | ||
1453 | int hv_dev_pread(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, | ||
1454 | __hv64 offset); | ||
1455 | |||
1456 | #define HV_DEV_NB_EMPTY 0x1 /**< Don't block when no bytes of data can | ||
1457 | be transferred. */ | ||
1458 | #define HV_DEV_NB_PARTIAL 0x2 /**< Don't block when some bytes, but not all | ||
1459 | of the requested bytes, can be | ||
1460 | transferred. */ | ||
1461 | #define HV_DEV_NOCACHE 0x4 /**< The caller warrants that none of the | ||
1462 | cache lines which might contain data | ||
1463 | from the requested buffer are valid. | ||
1464 | Useful with asynchronous operations | ||
1465 | only. */ | ||
1466 | |||
1467 | #define HV_DEV_ALLFLAGS (HV_DEV_NB_EMPTY | HV_DEV_NB_PARTIAL | \ | ||
1468 | HV_DEV_NOCACHE) /**< All HV_DEV_xxx flags */ | ||
1469 | |||
1470 | /** Write data to a hypervisor device synchronously. | ||
1471 | * | ||
1472 | * This service transfers data from a memory buffer to a hypervisor device. | ||
1473 | * When the service returns, the data has been read from the memory buffer, | ||
1474 | * and the buffer may be overwritten by the client; the data may not | ||
1475 | * necessarily have been conveyed to the actual hardware I/O interface. | ||
1476 | * | ||
1477 | * No ordering is guaranteed between requests issued from different tiles. | ||
1478 | * | ||
1479 | * Devices may choose to support both the synchronous and asynchronous write | ||
1480 | * operations, only one of them, or neither of them. | ||
1481 | * | ||
1482 | * @param devhdl Device handle of the device to be written to. | ||
1483 | * @param flags Flags (HV_DEV_xxx). | ||
1484 | * @param va Virtual address of the source data buffer. This buffer must | ||
1485 | * be mapped in the currently installed page table; if not, HV_EFAULT | ||
1486 | * may be returned. | ||
1487 | * @param len Number of bytes to be transferred. | ||
1488 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1489 | * often a byte offset from the beginning of the device; in other cases, | ||
1490 | * like on a control device, it may have a different meaning. | ||
1491 | * @return A non-negative value if the write was at least partially successful; | ||
1492 | * otherwise, a negative error code. The precise interpretation of | ||
1493 | * the return value is driver-dependent, but many drivers will return | ||
1494 | * the number of bytes successfully transferred. | ||
1495 | */ | ||
1496 | int hv_dev_pwrite(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, | ||
1497 | __hv64 offset); | ||
1498 | |||
1499 | |||
1500 | /** Interrupt arguments, used in the asynchronous I/O interfaces. */ | ||
1501 | #if CHIP_VA_WIDTH() > 32 | ||
1502 | typedef __hv64 HV_IntArg; | ||
1503 | #else | ||
1504 | typedef __hv32 HV_IntArg; | ||
1505 | #endif | ||
1506 | |||
1507 | /** Interrupt messages are delivered via the mechanism as normal messages, | ||
1508 | * but have a message source of HV_DEV_INTR. The message is formatted | ||
1509 | * as an HV_IntrMsg structure. | ||
1510 | */ | ||
1511 | |||
1512 | typedef struct | ||
1513 | { | ||
1514 | HV_IntArg intarg; /**< Interrupt argument, passed to the poll/preada/pwritea | ||
1515 | services */ | ||
1516 | HV_IntArg intdata; /**< Interrupt-specific interrupt data */ | ||
1517 | } HV_IntrMsg; | ||
1518 | |||
1519 | /** Request an interrupt message when a device condition is satisfied. | ||
1520 | * | ||
1521 | * This service requests that an interrupt message be delivered to the | ||
1522 | * requesting tile when a device becomes readable or writable, or when any | ||
1523 | * data queued to the device via previous write operations from this tile | ||
1524 | * has been actually sent out on the hardware I/O interface. Devices may | ||
1525 | * choose to support any, all, or none of the available conditions. | ||
1526 | * | ||
1527 | * If multiple conditions are specified, only one message will be | ||
1528 | * delivered. If the event mask delivered to that interrupt handler | ||
1529 | * indicates that some of the conditions have not yet occurred, the | ||
1530 | * client must issue another poll() call if it wishes to wait for those | ||
1531 | * conditions. | ||
1532 | * | ||
1533 | * Only one poll may be outstanding per device handle per tile. If more than | ||
1534 | * one tile is polling on the same device and condition, they will all be | ||
1535 | * notified when it happens. Because of this, clients may not assume that | ||
1536 | * the condition signaled is necessarily still true when they request a | ||
1537 | * subsequent service; for instance, the readable data which caused the | ||
1538 | * poll call to interrupt may have been read by another tile in the interim. | ||
1539 | * | ||
1540 | * The notification interrupt message could come directly, or via the | ||
1541 | * downcall (intctrl1) method, depending on what the tile is doing | ||
1542 | * when the condition is satisfied. Note that it is possible for the | ||
1543 | * requested interrupt to be delivered after this service is called but | ||
1544 | * before it returns. | ||
1545 | * | ||
1546 | * @param devhdl Device handle of the device to be polled. | ||
1547 | * @param events Flags denoting the events which will cause the interrupt to | ||
1548 | * be delivered (HV_DEVPOLL_xxx). | ||
1549 | * @param intarg Value which will be delivered as the intarg member of the | ||
1550 | * eventual interrupt message; the intdata member will be set to a | ||
1551 | * mask of HV_DEVPOLL_xxx values indicating which conditions have been | ||
1552 | * satisifed. | ||
1553 | * @return Zero if the interrupt was successfully scheduled; otherwise, a | ||
1554 | * negative error code. | ||
1555 | */ | ||
1556 | int hv_dev_poll(int devhdl, __hv32 events, HV_IntArg intarg); | ||
1557 | |||
1558 | #define HV_DEVPOLL_READ 0x1 /**< Test device for readability */ | ||
1559 | #define HV_DEVPOLL_WRITE 0x2 /**< Test device for writability */ | ||
1560 | #define HV_DEVPOLL_FLUSH 0x4 /**< Test device for output drained */ | ||
1561 | |||
1562 | |||
1563 | /** Cancel a request for an interrupt when a device event occurs. | ||
1564 | * | ||
1565 | * This service requests that no interrupt be delivered when the events | ||
1566 | * noted in the last-issued poll() call happen. Once this service returns, | ||
1567 | * the interrupt has been canceled; however, it is possible for the interrupt | ||
1568 | * to be delivered after this service is called but before it returns. | ||
1569 | * | ||
1570 | * @param devhdl Device handle of the device on which to cancel polling. | ||
1571 | * @return Zero if the poll was successfully canceled; otherwise, a negative | ||
1572 | * error code. | ||
1573 | */ | ||
1574 | int hv_dev_poll_cancel(int devhdl); | ||
1575 | |||
1576 | |||
1577 | /** Scatter-gather list for preada/pwritea calls. */ | ||
1578 | typedef struct | ||
1579 | #if CHIP_VA_WIDTH() <= 32 | ||
1580 | __attribute__ ((packed, aligned(4))) | ||
1581 | #endif | ||
1582 | { | ||
1583 | HV_PhysAddr pa; /**< Client physical address of the buffer segment. */ | ||
1584 | HV_PTE pte; /**< Page table entry describing the caching and location | ||
1585 | override characteristics of the buffer segment. Some | ||
1586 | drivers ignore this element and will require that | ||
1587 | the NOCACHE flag be set on their requests. */ | ||
1588 | __hv32 len; /**< Length of the buffer segment. */ | ||
1589 | } HV_SGL; | ||
1590 | |||
1591 | #define HV_SGL_MAXLEN 16 /**< Maximum number of entries in a scatter-gather | ||
1592 | list */ | ||
1593 | |||
1594 | /** Read data from a hypervisor device asynchronously. | ||
1595 | * | ||
1596 | * This service transfers data from a hypervisor device to a memory buffer. | ||
1597 | * When the service returns, the read has been scheduled. When the read | ||
1598 | * completes, an interrupt message will be delivered, and the buffer will | ||
1599 | * not be further modified by the driver. | ||
1600 | * | ||
1601 | * The number of possible outstanding asynchronous requests is defined by | ||
1602 | * each driver, but it is recommended that it be at least two requests | ||
1603 | * per tile per device. | ||
1604 | * | ||
1605 | * No ordering is guaranteed between synchronous and asynchronous requests, | ||
1606 | * even those issued on the same tile. | ||
1607 | * | ||
1608 | * The completion interrupt message could come directly, or via the downcall | ||
1609 | * (intctrl1) method, depending on what the tile is doing when the read | ||
1610 | * completes. Interrupts do not coalesce; one is delivered for each | ||
1611 | * asynchronous I/O request. Note that it is possible for the requested | ||
1612 | * interrupt to be delivered after this service is called but before it | ||
1613 | * returns. | ||
1614 | * | ||
1615 | * Devices may choose to support both the synchronous and asynchronous read | ||
1616 | * operations, only one of them, or neither of them. | ||
1617 | * | ||
1618 | * @param devhdl Device handle of the device to be read from. | ||
1619 | * @param flags Flags (HV_DEV_xxx). | ||
1620 | * @param sgl_len Number of elements in the scatter-gather list. | ||
1621 | * @param sgl Scatter-gather list describing the memory to which data will be | ||
1622 | * written. | ||
1623 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1624 | * often a byte offset from the beginning of the device; in other cases, | ||
1625 | * like on a control device, it may have a different meaning. | ||
1626 | * @param intarg Value which will be delivered as the intarg member of the | ||
1627 | * eventual interrupt message; the intdata member will be set to the | ||
1628 | * normal return value from the read request. | ||
1629 | * @return Zero if the read was successfully scheduled; otherwise, a negative | ||
1630 | * error code. Note that some drivers may choose to pre-validate | ||
1631 | * their arguments, and may thus detect certain device error | ||
1632 | * conditions at this time rather than when the completion notification | ||
1633 | * occurs, but this is not required. | ||
1634 | */ | ||
1635 | int hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
1636 | HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg); | ||
1637 | |||
1638 | |||
1639 | /** Write data to a hypervisor device asynchronously. | ||
1640 | * | ||
1641 | * This service transfers data from a memory buffer to a hypervisor | ||
1642 | * device. When the service returns, the write has been scheduled. | ||
1643 | * When the write completes, an interrupt message will be delivered, | ||
1644 | * and the buffer may be overwritten by the client; the data may not | ||
1645 | * necessarily have been conveyed to the actual hardware I/O interface. | ||
1646 | * | ||
1647 | * The number of possible outstanding asynchronous requests is defined by | ||
1648 | * each driver, but it is recommended that it be at least two requests | ||
1649 | * per tile per device. | ||
1650 | * | ||
1651 | * No ordering is guaranteed between synchronous and asynchronous requests, | ||
1652 | * even those issued on the same tile. | ||
1653 | * | ||
1654 | * The completion interrupt message could come directly, or via the downcall | ||
1655 | * (intctrl1) method, depending on what the tile is doing when the read | ||
1656 | * completes. Interrupts do not coalesce; one is delivered for each | ||
1657 | * asynchronous I/O request. Note that it is possible for the requested | ||
1658 | * interrupt to be delivered after this service is called but before it | ||
1659 | * returns. | ||
1660 | * | ||
1661 | * Devices may choose to support both the synchronous and asynchronous write | ||
1662 | * operations, only one of them, or neither of them. | ||
1663 | * | ||
1664 | * @param devhdl Device handle of the device to be read from. | ||
1665 | * @param flags Flags (HV_DEV_xxx). | ||
1666 | * @param sgl_len Number of elements in the scatter-gather list. | ||
1667 | * @param sgl Scatter-gather list describing the memory from which data will be | ||
1668 | * read. | ||
1669 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1670 | * often a byte offset from the beginning of the device; in other cases, | ||
1671 | * like on a control device, it may have a different meaning. | ||
1672 | * @param intarg Value which will be delivered as the intarg member of the | ||
1673 | * eventual interrupt message; the intdata member will be set to the | ||
1674 | * normal return value from the write request. | ||
1675 | * @return Zero if the write was successfully scheduled; otherwise, a negative | ||
1676 | * error code. Note that some drivers may choose to pre-validate | ||
1677 | * their arguments, and may thus detect certain device error | ||
1678 | * conditions at this time rather than when the completion notification | ||
1679 | * occurs, but this is not required. | ||
1680 | */ | ||
1681 | int hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
1682 | HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg); | ||
1683 | |||
1684 | |||
1685 | /** Define a pair of tile and ASID to identify a user process context. */ | ||
1686 | typedef struct | ||
1687 | { | ||
1688 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
1689 | unsigned int x:11; | ||
1690 | |||
1691 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
1692 | unsigned int y:11; | ||
1693 | |||
1694 | /** ASID of the process on this x,y tile */ | ||
1695 | HV_ASID asid:10; | ||
1696 | } HV_Remote_ASID; | ||
1697 | |||
1698 | /** Flush cache and/or TLB state on remote tiles. | ||
1699 | * | ||
1700 | * @param cache_pa Client physical address to flush from cache (ignored if | ||
1701 | * the length encoded in cache_control is zero, or if | ||
1702 | * HV_FLUSH_EVICT_L2 is set, or if cache_cpumask is NULL). | ||
1703 | * @param cache_control This argument allows you to specify a length of | ||
1704 | * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). | ||
1705 | * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. | ||
1706 | * You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache. | ||
1707 | * HV_FLUSH_ALL flushes all caches. | ||
1708 | * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of | ||
1709 | * tile indices to perform cache flush on. The low bit of the first | ||
1710 | * word corresponds to the tile at the upper left-hand corner of the | ||
1711 | * supervisor's rectangle. If passed as a NULL pointer, equivalent | ||
1712 | * to an empty bitmask. On chips which support hash-for-home caching, | ||
1713 | * if passed as -1, equivalent to a mask containing tiles which could | ||
1714 | * be doing hash-for-home caching. | ||
1715 | * @param tlb_va Virtual address to flush from TLB (ignored if | ||
1716 | * tlb_length is zero or tlb_cpumask is NULL). | ||
1717 | * @param tlb_length Number of bytes of data to flush from the TLB. | ||
1718 | * @param tlb_pgsize Page size to use for TLB flushes. | ||
1719 | * tlb_va and tlb_length need not be aligned to this size. | ||
1720 | * @param tlb_cpumask Bitmask for tlb flush, like cache_cpumask. | ||
1721 | * If passed as a NULL pointer, equivalent to an empty bitmask. | ||
1722 | * @param asids Pointer to an HV_Remote_ASID array of tile/ASID pairs to flush. | ||
1723 | * @param asidcount Number of HV_Remote_ASID entries in asids[]. | ||
1724 | * @return Zero for success, or else HV_EINVAL or HV_EFAULT for errors that | ||
1725 | * are detected while parsing the arguments. | ||
1726 | */ | ||
1727 | int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control, | ||
1728 | unsigned long* cache_cpumask, | ||
1729 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
1730 | unsigned long tlb_pgsize, unsigned long* tlb_cpumask, | ||
1731 | HV_Remote_ASID* asids, int asidcount); | ||
1732 | |||
1733 | /** Include in cache_control to ensure a flush of the entire L2. */ | ||
1734 | #define HV_FLUSH_EVICT_L2 (1UL << 31) | ||
1735 | |||
1736 | /** Include in cache_control to ensure a flush of the entire L1I. */ | ||
1737 | #define HV_FLUSH_EVICT_L1I (1UL << 30) | ||
1738 | |||
1739 | /** Maximum legal size to use for the "length" component of cache_control. */ | ||
1740 | #define HV_FLUSH_MAX_CACHE_LEN ((1UL << 30) - 1) | ||
1741 | |||
1742 | /** Use for cache_control to ensure a flush of all caches. */ | ||
1743 | #define HV_FLUSH_ALL -1UL | ||
1744 | |||
1745 | #else /* __ASSEMBLER__ */ | ||
1746 | |||
1747 | /** Include in cache_control to ensure a flush of the entire L2. */ | ||
1748 | #define HV_FLUSH_EVICT_L2 (1 << 31) | ||
1749 | |||
1750 | /** Include in cache_control to ensure a flush of the entire L1I. */ | ||
1751 | #define HV_FLUSH_EVICT_L1I (1 << 30) | ||
1752 | |||
1753 | /** Maximum legal size to use for the "length" component of cache_control. */ | ||
1754 | #define HV_FLUSH_MAX_CACHE_LEN ((1 << 30) - 1) | ||
1755 | |||
1756 | /** Use for cache_control to ensure a flush of all caches. */ | ||
1757 | #define HV_FLUSH_ALL -1 | ||
1758 | |||
1759 | #endif /* __ASSEMBLER__ */ | ||
1760 | |||
1761 | #ifndef __ASSEMBLER__ | ||
1762 | |||
1763 | /** Return a 64-bit value corresponding to the PTE if needed */ | ||
1764 | #define hv_pte_val(pte) ((pte).val) | ||
1765 | |||
1766 | /** Cast a 64-bit value to an HV_PTE */ | ||
1767 | #define hv_pte(val) ((HV_PTE) { val }) | ||
1768 | |||
1769 | #endif /* !__ASSEMBLER__ */ | ||
1770 | |||
1771 | |||
1772 | /** Bits in the size of an HV_PTE */ | ||
1773 | #define HV_LOG2_PTE_SIZE 3 | ||
1774 | |||
1775 | /** Size of an HV_PTE */ | ||
1776 | #define HV_PTE_SIZE (1 << HV_LOG2_PTE_SIZE) | ||
1777 | |||
1778 | |||
1779 | /* Bits in HV_PTE's low word. */ | ||
1780 | #define HV_PTE_INDEX_PRESENT 0 /**< PTE is valid */ | ||
1781 | #define HV_PTE_INDEX_MIGRATING 1 /**< Page is migrating */ | ||
1782 | #define HV_PTE_INDEX_CLIENT0 2 /**< Page client state 0 */ | ||
1783 | #define HV_PTE_INDEX_CLIENT1 3 /**< Page client state 1 */ | ||
1784 | #define HV_PTE_INDEX_NC 4 /**< L1$/L2$ incoherent with L3$ */ | ||
1785 | #define HV_PTE_INDEX_NO_ALLOC_L1 5 /**< Page is uncached in local L1$ */ | ||
1786 | #define HV_PTE_INDEX_NO_ALLOC_L2 6 /**< Page is uncached in local L2$ */ | ||
1787 | #define HV_PTE_INDEX_CACHED_PRIORITY 7 /**< Page is priority cached */ | ||
1788 | #define HV_PTE_INDEX_PAGE 8 /**< PTE describes a page */ | ||
1789 | #define HV_PTE_INDEX_GLOBAL 9 /**< Page is global */ | ||
1790 | #define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */ | ||
1791 | #define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */ | ||
1792 | #define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */ | ||
1793 | /* Bits 13-15 are reserved for | ||
1794 | future use. */ | ||
1795 | #define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */ | ||
1796 | #define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */ | ||
1797 | /* Bit 19 is reserved for | ||
1798 | future use. */ | ||
1799 | #define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits | ||
1800 | of word */ | ||
1801 | #define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */ | ||
1802 | |||
1803 | /* Bits in HV_PTE's high word. */ | ||
1804 | #define HV_PTE_INDEX_READABLE 32 /**< Page is readable */ | ||
1805 | #define HV_PTE_INDEX_WRITABLE 33 /**< Page is writable */ | ||
1806 | #define HV_PTE_INDEX_EXECUTABLE 34 /**< Page is executable */ | ||
1807 | #define HV_PTE_INDEX_PTFN 35 /**< Page's PTFN; must be high bits | ||
1808 | of word */ | ||
1809 | #define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ | ||
1810 | |||
1811 | /** Position of the PFN field within the PTE (subset of the PTFN). */ | ||
1812 | #define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \ | ||
1813 | HV_LOG2_PAGE_TABLE_ALIGN)) | ||
1814 | |||
1815 | /** Length of the PFN field within the PTE (subset of the PTFN). */ | ||
1816 | #define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \ | ||
1817 | (HV_LOG2_PAGE_SIZE_SMALL - \ | ||
1818 | HV_LOG2_PAGE_TABLE_ALIGN)) | ||
1819 | |||
1820 | /* | ||
1821 | * Legal values for the PTE's mode field | ||
1822 | */ | ||
1823 | /** Data is not resident in any caches; loads and stores access memory | ||
1824 | * directly. | ||
1825 | */ | ||
1826 | #define HV_PTE_MODE_UNCACHED 1 | ||
1827 | |||
1828 | /** Data is resident in the tile's local L1 and/or L2 caches; if a load | ||
1829 | * or store misses there, it goes to memory. | ||
1830 | * | ||
1831 | * The copy in the local L1$/L2$ is not invalidated when the copy in | ||
1832 | * memory is changed. | ||
1833 | */ | ||
1834 | #define HV_PTE_MODE_CACHE_NO_L3 2 | ||
1835 | |||
1836 | /** Data is resident in the tile's local L1 and/or L2 caches. If a load | ||
1837 | * or store misses there, it goes to an L3 cache in a designated tile; | ||
1838 | * if it misses there, it goes to memory. | ||
1839 | * | ||
1840 | * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated | ||
1841 | * when the copy in the remote L3$ is changed. Otherwise, such | ||
1842 | * invalidation will not occur. | ||
1843 | * | ||
1844 | * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support | ||
1845 | * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is | ||
1846 | * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode. | ||
1847 | */ | ||
1848 | #define HV_PTE_MODE_CACHE_TILE_L3 3 | ||
1849 | |||
1850 | /** Data is resident in the tile's local L1 and/or L2 caches. If a load | ||
1851 | * or store misses there, it goes to an L3 cache in one of a set of | ||
1852 | * designated tiles; if it misses there, it goes to memory. Which tile | ||
1853 | * is chosen from the set depends upon a hash function applied to the | ||
1854 | * physical address. This mode is not supported on chips for which | ||
1855 | * CHIP_HAS_CBOX_HOME_MAP() is 0. | ||
1856 | * | ||
1857 | * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated | ||
1858 | * when the copy in the remote L3$ is changed. Otherwise, such | ||
1859 | * invalidation will not occur. | ||
1860 | * | ||
1861 | * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support | ||
1862 | * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is | ||
1863 | * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode. | ||
1864 | */ | ||
1865 | #define HV_PTE_MODE_CACHE_HASH_L3 4 | ||
1866 | |||
1867 | /** Data is not resident in memory; accesses are instead made to an I/O | ||
1868 | * device, whose tile coordinates are given by the PTE's LOTAR field. | ||
1869 | * This mode is only supported on chips for which CHIP_HAS_MMIO() is 1. | ||
1870 | * The EXECUTABLE bit may not be set in an MMIO PTE. | ||
1871 | */ | ||
1872 | #define HV_PTE_MODE_MMIO 5 | ||
1873 | |||
1874 | |||
1875 | /* C wants 1ULL so it is typed as __hv64, but the assembler needs just numbers. | ||
1876 | * The assembler can't handle shifts greater than 31, but treats them | ||
1877 | * as shifts mod 32, so assembler code must be aware of which word | ||
1878 | * the bit belongs in when using these macros. | ||
1879 | */ | ||
1880 | #ifdef __ASSEMBLER__ | ||
1881 | #define __HV_PTE_ONE 1 /**< One, for assembler */ | ||
1882 | #else | ||
1883 | #define __HV_PTE_ONE 1ULL /**< One, for C */ | ||
1884 | #endif | ||
1885 | |||
1886 | /** Is this PTE present? | ||
1887 | * | ||
1888 | * If this bit is set, this PTE represents a valid translation or level-2 | ||
1889 | * page table pointer. Otherwise, the page table does not contain a | ||
1890 | * translation for the subject virtual pages. | ||
1891 | * | ||
1892 | * If this bit is not set, the other bits in the PTE are not | ||
1893 | * interpreted by the hypervisor, and may contain any value. | ||
1894 | */ | ||
1895 | #define HV_PTE_PRESENT (__HV_PTE_ONE << HV_PTE_INDEX_PRESENT) | ||
1896 | |||
1897 | /** Does this PTE map a page? | ||
1898 | * | ||
1899 | * If this bit is set in the level-1 page table, the entry should be | ||
1900 | * interpreted as a level-2 page table entry mapping a large page. | ||
1901 | * | ||
1902 | * This bit should not be modified by the client while PRESENT is set, as | ||
1903 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1904 | * | ||
1905 | * In a level-2 page table, this bit is ignored and must be zero. | ||
1906 | */ | ||
1907 | #define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE) | ||
1908 | |||
1909 | /** Is this a global (non-ASID) mapping? | ||
1910 | * | ||
1911 | * If this bit is set, the translations established by this PTE will | ||
1912 | * not be flushed from the TLB by the hv_flush_asid() service; they | ||
1913 | * will be flushed by the hv_flush_page() or hv_flush_pages() services. | ||
1914 | * | ||
1915 | * Setting this bit for translations which are identical in all page | ||
1916 | * tables (for instance, code and data belonging to a client OS) can | ||
1917 | * be very beneficial, as it will reduce the number of TLB misses. | ||
1918 | * Note that, while it is not an error which will be detected by the | ||
1919 | * hypervisor, it is an extremely bad idea to set this bit for | ||
1920 | * translations which are _not_ identical in all page tables. | ||
1921 | * | ||
1922 | * This bit should not be modified by the client while PRESENT is set, as | ||
1923 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1924 | * | ||
1925 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1926 | */ | ||
1927 | #define HV_PTE_GLOBAL (__HV_PTE_ONE << HV_PTE_INDEX_GLOBAL) | ||
1928 | |||
1929 | /** Is this mapping accessible to users? | ||
1930 | * | ||
1931 | * If this bit is set, code running at any PL will be permitted to | ||
1932 | * access the virtual addresses mapped by this PTE. Otherwise, only | ||
1933 | * code running at PL 1 or above will be allowed to do so. | ||
1934 | * | ||
1935 | * This bit should not be modified by the client while PRESENT is set, as | ||
1936 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1937 | * | ||
1938 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1939 | */ | ||
1940 | #define HV_PTE_USER (__HV_PTE_ONE << HV_PTE_INDEX_USER) | ||
1941 | |||
1942 | /** Has this mapping been accessed? | ||
1943 | * | ||
1944 | * This bit is set by the hypervisor when the memory described by the | ||
1945 | * translation is accessed for the first time. It is never cleared by | ||
1946 | * the hypervisor, but may be cleared by the client. After the bit | ||
1947 | * has been cleared, subsequent references are not guaranteed to set | ||
1948 | * it again until the translation has been flushed from the TLB. | ||
1949 | * | ||
1950 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1951 | */ | ||
1952 | #define HV_PTE_ACCESSED (__HV_PTE_ONE << HV_PTE_INDEX_ACCESSED) | ||
1953 | |||
1954 | /** Is this mapping dirty? | ||
1955 | * | ||
1956 | * This bit is set by the hypervisor when the memory described by the | ||
1957 | * translation is written for the first time. It is never cleared by | ||
1958 | * the hypervisor, but may be cleared by the client. After the bit | ||
1959 | * has been cleared, subsequent references are not guaranteed to set | ||
1960 | * it again until the translation has been flushed from the TLB. | ||
1961 | * | ||
1962 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1963 | */ | ||
1964 | #define HV_PTE_DIRTY (__HV_PTE_ONE << HV_PTE_INDEX_DIRTY) | ||
1965 | |||
1966 | /** Migrating bit in PTE. | ||
1967 | * | ||
1968 | * This bit is guaranteed not to be inspected or modified by the | ||
1969 | * hypervisor. The name is indicative of the suggested use by the client | ||
1970 | * to tag pages whose L3 cache is being migrated from one cpu to another. | ||
1971 | */ | ||
1972 | #define HV_PTE_MIGRATING (__HV_PTE_ONE << HV_PTE_INDEX_MIGRATING) | ||
1973 | |||
1974 | /** Client-private bit in PTE. | ||
1975 | * | ||
1976 | * This bit is guaranteed not to be inspected or modified by the | ||
1977 | * hypervisor. | ||
1978 | */ | ||
1979 | #define HV_PTE_CLIENT0 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT0) | ||
1980 | |||
1981 | /** Client-private bit in PTE. | ||
1982 | * | ||
1983 | * This bit is guaranteed not to be inspected or modified by the | ||
1984 | * hypervisor. | ||
1985 | */ | ||
1986 | #define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1) | ||
1987 | |||
1988 | /** Non-coherent (NC) bit in PTE. | ||
1989 | * | ||
1990 | * If this bit is set, the mapping that is set up will be non-coherent | ||
1991 | * (also known as non-inclusive). This means that changes to the L3 | ||
1992 | * cache will not cause a local copy to be invalidated. It is generally | ||
1993 | * recommended only for read-only mappings. | ||
1994 | * | ||
1995 | * In level-1 PTEs, if the Page bit is clear, this bit determines how the | ||
1996 | * level-2 page table is accessed. | ||
1997 | */ | ||
1998 | #define HV_PTE_NC (__HV_PTE_ONE << HV_PTE_INDEX_NC) | ||
1999 | |||
2000 | /** Is this page prevented from filling the L1$? | ||
2001 | * | ||
2002 | * If this bit is set, the page described by the PTE will not be cached | ||
2003 | * the local cpu's L1 cache. | ||
2004 | * | ||
2005 | * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip, | ||
2006 | * it is illegal to use this attribute, and may cause client termination. | ||
2007 | * | ||
2008 | * In level-1 PTEs, if the Page bit is clear, this bit | ||
2009 | * determines how the level-2 page table is accessed. | ||
2010 | */ | ||
2011 | #define HV_PTE_NO_ALLOC_L1 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L1) | ||
2012 | |||
2013 | /** Is this page prevented from filling the L2$? | ||
2014 | * | ||
2015 | * If this bit is set, the page described by the PTE will not be cached | ||
2016 | * the local cpu's L2 cache. | ||
2017 | * | ||
2018 | * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip, | ||
2019 | * it is illegal to use this attribute, and may cause client termination. | ||
2020 | * | ||
2021 | * In level-1 PTEs, if the Page bit is clear, this bit determines how the | ||
2022 | * level-2 page table is accessed. | ||
2023 | */ | ||
2024 | #define HV_PTE_NO_ALLOC_L2 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L2) | ||
2025 | |||
2026 | /** Is this a priority page? | ||
2027 | * | ||
2028 | * If this bit is set, the page described by the PTE will be given | ||
2029 | * priority in the cache. Normally this translates into allowing the | ||
2030 | * page to use only the "red" half of the cache. The client may wish to | ||
2031 | * then use the hv_set_caching service to specify that other pages which | ||
2032 | * alias this page will use only the "black" half of the cache. | ||
2033 | * | ||
2034 | * If the Cached Priority bit is clear, the hypervisor uses the | ||
2035 | * current hv_set_caching() value to choose how to cache the page. | ||
2036 | * | ||
2037 | * It is illegal to set the Cached Priority bit if the Non-Cached bit | ||
2038 | * is set and the Cached Remotely bit is clear, i.e. if requests to | ||
2039 | * the page map directly to memory. | ||
2040 | * | ||
2041 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2042 | */ | ||
2043 | #define HV_PTE_CACHED_PRIORITY (__HV_PTE_ONE << \ | ||
2044 | HV_PTE_INDEX_CACHED_PRIORITY) | ||
2045 | |||
2046 | /** Is this a readable mapping? | ||
2047 | * | ||
2048 | * If this bit is set, code will be permitted to read from (e.g., | ||
2049 | * issue load instructions against) the virtual addresses mapped by | ||
2050 | * this PTE. | ||
2051 | * | ||
2052 | * It is illegal for this bit to be clear if the Writable bit is set. | ||
2053 | * | ||
2054 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2055 | */ | ||
2056 | #define HV_PTE_READABLE (__HV_PTE_ONE << HV_PTE_INDEX_READABLE) | ||
2057 | |||
2058 | /** Is this a writable mapping? | ||
2059 | * | ||
2060 | * If this bit is set, code will be permitted to write to (e.g., issue | ||
2061 | * store instructions against) the virtual addresses mapped by this | ||
2062 | * PTE. | ||
2063 | * | ||
2064 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2065 | */ | ||
2066 | #define HV_PTE_WRITABLE (__HV_PTE_ONE << HV_PTE_INDEX_WRITABLE) | ||
2067 | |||
2068 | /** Is this an executable mapping? | ||
2069 | * | ||
2070 | * If this bit is set, code will be permitted to execute from | ||
2071 | * (e.g., jump to) the virtual addresses mapped by this PTE. | ||
2072 | * | ||
2073 | * This bit applies to any processor on the tile, if there are more | ||
2074 | * than one. | ||
2075 | * | ||
2076 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2077 | */ | ||
2078 | #define HV_PTE_EXECUTABLE (__HV_PTE_ONE << HV_PTE_INDEX_EXECUTABLE) | ||
2079 | |||
2080 | /** The width of a LOTAR's x or y bitfield. */ | ||
2081 | #define HV_LOTAR_WIDTH 11 | ||
2082 | |||
2083 | /** Converts an x,y pair to a LOTAR value. */ | ||
2084 | #define HV_XY_TO_LOTAR(x, y) ((HV_LOTAR)(((x) << HV_LOTAR_WIDTH) | (y))) | ||
2085 | |||
2086 | /** Extracts the X component of a lotar. */ | ||
2087 | #define HV_LOTAR_X(lotar) ((lotar) >> HV_LOTAR_WIDTH) | ||
2088 | |||
2089 | /** Extracts the Y component of a lotar. */ | ||
2090 | #define HV_LOTAR_Y(lotar) ((lotar) & ((1 << HV_LOTAR_WIDTH) - 1)) | ||
2091 | |||
2092 | #ifndef __ASSEMBLER__ | ||
2093 | |||
2094 | /** Define accessor functions for a PTE bit. */ | ||
2095 | #define _HV_BIT(name, bit) \ | ||
2096 | static __inline int \ | ||
2097 | hv_pte_get_##name(HV_PTE pte) \ | ||
2098 | { \ | ||
2099 | return (pte.val >> HV_PTE_INDEX_##bit) & 1; \ | ||
2100 | } \ | ||
2101 | \ | ||
2102 | static __inline HV_PTE \ | ||
2103 | hv_pte_set_##name(HV_PTE pte) \ | ||
2104 | { \ | ||
2105 | pte.val |= 1ULL << HV_PTE_INDEX_##bit; \ | ||
2106 | return pte; \ | ||
2107 | } \ | ||
2108 | \ | ||
2109 | static __inline HV_PTE \ | ||
2110 | hv_pte_clear_##name(HV_PTE pte) \ | ||
2111 | { \ | ||
2112 | pte.val &= ~(1ULL << HV_PTE_INDEX_##bit); \ | ||
2113 | return pte; \ | ||
2114 | } | ||
2115 | |||
2116 | /* Generate accessors to get, set, and clear various PTE flags. | ||
2117 | */ | ||
2118 | _HV_BIT(present, PRESENT) | ||
2119 | _HV_BIT(page, PAGE) | ||
2120 | _HV_BIT(client0, CLIENT0) | ||
2121 | _HV_BIT(client1, CLIENT1) | ||
2122 | _HV_BIT(migrating, MIGRATING) | ||
2123 | _HV_BIT(nc, NC) | ||
2124 | _HV_BIT(readable, READABLE) | ||
2125 | _HV_BIT(writable, WRITABLE) | ||
2126 | _HV_BIT(executable, EXECUTABLE) | ||
2127 | _HV_BIT(accessed, ACCESSED) | ||
2128 | _HV_BIT(dirty, DIRTY) | ||
2129 | _HV_BIT(no_alloc_l1, NO_ALLOC_L1) | ||
2130 | _HV_BIT(no_alloc_l2, NO_ALLOC_L2) | ||
2131 | _HV_BIT(cached_priority, CACHED_PRIORITY) | ||
2132 | _HV_BIT(global, GLOBAL) | ||
2133 | _HV_BIT(user, USER) | ||
2134 | |||
2135 | #undef _HV_BIT | ||
2136 | |||
2137 | /** Get the page mode from the PTE. | ||
2138 | * | ||
2139 | * This field generally determines whether and how accesses to the page | ||
2140 | * are cached; the HV_PTE_MODE_xxx symbols define the legal values for the | ||
2141 | * page mode. The NC, NO_ALLOC_L1, and NO_ALLOC_L2 bits modify this | ||
2142 | * general policy. | ||
2143 | */ | ||
2144 | static __inline unsigned int | ||
2145 | hv_pte_get_mode(const HV_PTE pte) | ||
2146 | { | ||
2147 | return (((__hv32) pte.val) >> HV_PTE_INDEX_MODE) & | ||
2148 | ((1 << HV_PTE_MODE_BITS) - 1); | ||
2149 | } | ||
2150 | |||
2151 | /** Set the page mode into a PTE. See hv_pte_get_mode. */ | ||
2152 | static __inline HV_PTE | ||
2153 | hv_pte_set_mode(HV_PTE pte, unsigned int val) | ||
2154 | { | ||
2155 | pte.val &= ~(((1ULL << HV_PTE_MODE_BITS) - 1) << HV_PTE_INDEX_MODE); | ||
2156 | pte.val |= val << HV_PTE_INDEX_MODE; | ||
2157 | return pte; | ||
2158 | } | ||
2159 | |||
2160 | /** Get the page frame number from the PTE. | ||
2161 | * | ||
2162 | * This field contains the upper bits of the CPA (client physical | ||
2163 | * address) of the target page; the complete CPA is this field with | ||
2164 | * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. | ||
2165 | * | ||
2166 | * For PTEs in a level-1 page table where the Page bit is set, the | ||
2167 | * CPA must be aligned modulo the large page size. | ||
2168 | */ | ||
2169 | static __inline unsigned int | ||
2170 | hv_pte_get_pfn(const HV_PTE pte) | ||
2171 | { | ||
2172 | return pte.val >> HV_PTE_INDEX_PFN; | ||
2173 | } | ||
2174 | |||
2175 | |||
2176 | /** Set the page frame number into a PTE. See hv_pte_get_pfn. */ | ||
2177 | static __inline HV_PTE | ||
2178 | hv_pte_set_pfn(HV_PTE pte, unsigned int val) | ||
2179 | { | ||
2180 | /* | ||
2181 | * Note that the use of "PTFN" in the next line is intentional; we | ||
2182 | * don't want any garbage lower bits left in that field. | ||
2183 | */ | ||
2184 | pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN); | ||
2185 | pte.val |= (__hv64) val << HV_PTE_INDEX_PFN; | ||
2186 | return pte; | ||
2187 | } | ||
2188 | |||
2189 | /** Get the page table frame number from the PTE. | ||
2190 | * | ||
2191 | * This field contains the upper bits of the CPA (client physical | ||
2192 | * address) of the target page table; the complete CPA is this field with | ||
2193 | * with HV_PAGE_TABLE_ALIGN zero bits appended to it. | ||
2194 | * | ||
2195 | * For PTEs in a level-1 page table when the Page bit is not set, the | ||
2196 | * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and | ||
2197 | * the level-2 page table size. | ||
2198 | */ | ||
2199 | static __inline unsigned long | ||
2200 | hv_pte_get_ptfn(const HV_PTE pte) | ||
2201 | { | ||
2202 | return pte.val >> HV_PTE_INDEX_PTFN; | ||
2203 | } | ||
2204 | |||
2205 | |||
2206 | /** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ | ||
2207 | static __inline HV_PTE | ||
2208 | hv_pte_set_ptfn(HV_PTE pte, unsigned long val) | ||
2209 | { | ||
2210 | pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS)-1) << HV_PTE_INDEX_PTFN); | ||
2211 | pte.val |= (__hv64) val << HV_PTE_INDEX_PTFN; | ||
2212 | return pte; | ||
2213 | } | ||
2214 | |||
2215 | |||
2216 | /** Get the remote tile caching this page. | ||
2217 | * | ||
2218 | * Specifies the remote tile which is providing the L3 cache for this page. | ||
2219 | * | ||
2220 | * This field is ignored unless the page mode is HV_PTE_MODE_CACHE_TILE_L3. | ||
2221 | * | ||
2222 | * In level-1 PTEs, if the Page bit is clear, this field determines how the | ||
2223 | * level-2 page table is accessed. | ||
2224 | */ | ||
2225 | static __inline unsigned int | ||
2226 | hv_pte_get_lotar(const HV_PTE pte) | ||
2227 | { | ||
2228 | unsigned int lotar = ((__hv32) pte.val) >> HV_PTE_INDEX_LOTAR; | ||
2229 | |||
2230 | return HV_XY_TO_LOTAR( (lotar >> (HV_PTE_LOTAR_BITS / 2)), | ||
2231 | (lotar & ((1 << (HV_PTE_LOTAR_BITS / 2)) - 1)) ); | ||
2232 | } | ||
2233 | |||
2234 | |||
2235 | /** Set the remote tile caching a page into a PTE. See hv_pte_get_lotar. */ | ||
2236 | static __inline HV_PTE | ||
2237 | hv_pte_set_lotar(HV_PTE pte, unsigned int val) | ||
2238 | { | ||
2239 | unsigned int x = HV_LOTAR_X(val); | ||
2240 | unsigned int y = HV_LOTAR_Y(val); | ||
2241 | |||
2242 | pte.val &= ~(((1ULL << HV_PTE_LOTAR_BITS)-1) << HV_PTE_INDEX_LOTAR); | ||
2243 | pte.val |= (x << (HV_PTE_INDEX_LOTAR + HV_PTE_LOTAR_BITS / 2)) | | ||
2244 | (y << HV_PTE_INDEX_LOTAR); | ||
2245 | return pte; | ||
2246 | } | ||
2247 | |||
2248 | #endif /* !__ASSEMBLER__ */ | ||
2249 | |||
2250 | /** Converts a client physical address to a pfn. */ | ||
2251 | #define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL) | ||
2252 | |||
2253 | /** Converts a pfn to a client physical address. */ | ||
2254 | #define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL) | ||
2255 | |||
2256 | /** Converts a client physical address to a ptfn. */ | ||
2257 | #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) | ||
2258 | |||
2259 | /** Converts a ptfn to a client physical address. */ | ||
2260 | #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) | ||
2261 | |||
2262 | /** Converts a ptfn to a pfn. */ | ||
2263 | #define HV_PTFN_TO_PFN(p) \ | ||
2264 | ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) | ||
2265 | |||
2266 | /** Converts a pfn to a ptfn. */ | ||
2267 | #define HV_PFN_TO_PTFN(p) \ | ||
2268 | ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) | ||
2269 | |||
2270 | #if CHIP_VA_WIDTH() > 32 | ||
2271 | |||
2272 | /** Log number of HV_PTE entries in L0 page table */ | ||
2273 | #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) | ||
2274 | |||
2275 | /** Number of HV_PTE entries in L0 page table */ | ||
2276 | #define HV_L0_ENTRIES (1 << HV_LOG2_L0_ENTRIES) | ||
2277 | |||
2278 | /** Log size of L0 page table in bytes */ | ||
2279 | #define HV_LOG2_L0_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L0_ENTRIES) | ||
2280 | |||
2281 | /** Size of L0 page table in bytes */ | ||
2282 | #define HV_L0_SIZE (1 << HV_LOG2_L0_SIZE) | ||
2283 | |||
2284 | #ifdef __ASSEMBLER__ | ||
2285 | |||
2286 | /** Index in L0 for a specific VA */ | ||
2287 | #define HV_L0_INDEX(va) \ | ||
2288 | (((va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) | ||
2289 | |||
2290 | #else | ||
2291 | |||
2292 | /** Index in L1 for a specific VA */ | ||
2293 | #define HV_L0_INDEX(va) \ | ||
2294 | (((HV_VirtAddr)(va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) | ||
2295 | |||
2296 | #endif | ||
2297 | |||
2298 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2299 | |||
2300 | /** Log number of HV_PTE entries in L1 page table */ | ||
2301 | #define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) | ||
2302 | |||
2303 | /** Number of HV_PTE entries in L1 page table */ | ||
2304 | #define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) | ||
2305 | |||
2306 | /** Log size of L1 page table in bytes */ | ||
2307 | #define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) | ||
2308 | |||
2309 | /** Size of L1 page table in bytes */ | ||
2310 | #define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) | ||
2311 | |||
2312 | /** Log number of HV_PTE entries in level-2 page table */ | ||
2313 | #define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) | ||
2314 | |||
2315 | /** Number of HV_PTE entries in level-2 page table */ | ||
2316 | #define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) | ||
2317 | |||
2318 | /** Log size of level-2 page table in bytes */ | ||
2319 | #define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) | ||
2320 | |||
2321 | /** Size of level-2 page table in bytes */ | ||
2322 | #define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) | ||
2323 | |||
2324 | #ifdef __ASSEMBLER__ | ||
2325 | |||
2326 | #if CHIP_VA_WIDTH() > 32 | ||
2327 | |||
2328 | /** Index in L1 for a specific VA */ | ||
2329 | #define HV_L1_INDEX(va) \ | ||
2330 | (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) | ||
2331 | |||
2332 | #else /* CHIP_VA_WIDTH() > 32 */ | ||
2333 | |||
2334 | /** Index in L1 for a specific VA */ | ||
2335 | #define HV_L1_INDEX(va) \ | ||
2336 | (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) | ||
2337 | |||
2338 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2339 | |||
2340 | /** Index in level-2 page table for a specific VA */ | ||
2341 | #define HV_L2_INDEX(va) \ | ||
2342 | (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) | ||
2343 | |||
2344 | #else /* __ASSEMBLER __ */ | ||
2345 | |||
2346 | #if CHIP_VA_WIDTH() > 32 | ||
2347 | |||
2348 | /** Index in L1 for a specific VA */ | ||
2349 | #define HV_L1_INDEX(va) \ | ||
2350 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) | ||
2351 | |||
2352 | #else /* CHIP_VA_WIDTH() > 32 */ | ||
2353 | |||
2354 | /** Index in L1 for a specific VA */ | ||
2355 | #define HV_L1_INDEX(va) \ | ||
2356 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) | ||
2357 | |||
2358 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2359 | |||
2360 | /** Index in level-2 page table for a specific VA */ | ||
2361 | #define HV_L2_INDEX(va) \ | ||
2362 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) | ||
2363 | |||
2364 | #endif /* __ASSEMBLER __ */ | ||
2365 | |||
2366 | #endif /* _TILE_HV_H */ | ||
diff --git a/arch/tile/include/hv/syscall_public.h b/arch/tile/include/hv/syscall_public.h new file mode 100644 index 000000000000..9cc0837e69fd --- /dev/null +++ b/arch/tile/include/hv/syscall_public.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file syscall.h | ||
17 | * Indices for the hypervisor system calls that are intended to be called | ||
18 | * directly, rather than only through hypervisor-generated "glue" code. | ||
19 | */ | ||
20 | |||
21 | #ifndef _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H | ||
22 | #define _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H | ||
23 | |||
24 | /** Fast syscall flag bit location. When this bit is set, the hypervisor | ||
25 | * handles the syscall specially. | ||
26 | */ | ||
27 | #define HV_SYS_FAST_SHIFT 14 | ||
28 | |||
29 | /** Fast syscall flag bit mask. */ | ||
30 | #define HV_SYS_FAST_MASK (1 << HV_SYS_FAST_SHIFT) | ||
31 | |||
32 | /** Bit location for flagging fast syscalls that can be called from PL0. */ | ||
33 | #define HV_SYS_FAST_PLO_SHIFT 13 | ||
34 | |||
35 | /** Fast syscall allowing PL0 bit mask. */ | ||
36 | #define HV_SYS_FAST_PL0_MASK (1 << HV_SYS_FAST_PLO_SHIFT) | ||
37 | |||
38 | /** Perform an MF that waits for all victims to reach DRAM. */ | ||
39 | #define HV_SYS_fence_incoherent (51 | HV_SYS_FAST_MASK \ | ||
40 | | HV_SYS_FAST_PL0_MASK) | ||
41 | |||
42 | #endif /* !_SYS_HV_INCLUDE_SYSCALL_PUBLIC_H */ | ||
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile new file mode 100644 index 000000000000..756e6ec452d3 --- /dev/null +++ b/arch/tile/kernel/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | # | ||
2 | # Makefile for the Linux/TILE kernel. | ||
3 | # | ||
4 | |||
5 | extra-y := vmlinux.lds head_$(BITS).o | ||
6 | obj-y := backtrace.o entry.o init_task.o irq.o messaging.o \ | ||
7 | pci-dma.o proc.o process.o ptrace.o reboot.o \ | ||
8 | setup.o signal.o single_step.o stack.o sys.o time.o traps.o \ | ||
9 | intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o | ||
10 | |||
11 | obj-$(CONFIG_TILEGX) += futex_64.o | ||
12 | obj-$(CONFIG_COMPAT) += compat.o compat_signal.o | ||
13 | obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o | ||
14 | obj-$(CONFIG_MODULES) += module.o | ||
15 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
16 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | ||
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c new file mode 100644 index 000000000000..01ddf19cc36d --- /dev/null +++ b/arch/tile/kernel/asm-offsets.c | |||
@@ -0,0 +1,76 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Generates definitions from c-type structures used by assembly sources. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kbuild.h> | ||
18 | #include <linux/thread_info.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/hardirq.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Check for compatible compiler early in the build. */ | ||
25 | #ifdef CONFIG_TILEGX | ||
26 | # ifndef __tilegx__ | ||
27 | # error Can only build TILE-Gx configurations with tilegx compiler | ||
28 | # endif | ||
29 | # ifndef __LP64__ | ||
30 | # error Must not specify -m32 when building the TILE-Gx kernel | ||
31 | # endif | ||
32 | #else | ||
33 | # ifdef __tilegx__ | ||
34 | # error Can not build TILEPro/TILE64 configurations with tilegx compiler | ||
35 | # endif | ||
36 | #endif | ||
37 | |||
38 | void foo(void) | ||
39 | { | ||
40 | DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \ | ||
41 | offsetof(struct single_step_state, buffer)); | ||
42 | DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \ | ||
43 | offsetof(struct single_step_state, flags)); | ||
44 | DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \ | ||
45 | offsetof(struct single_step_state, orig_pc)); | ||
46 | DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \ | ||
47 | offsetof(struct single_step_state, next_pc)); | ||
48 | DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \ | ||
49 | offsetof(struct single_step_state, branch_next_pc)); | ||
50 | DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \ | ||
51 | offsetof(struct single_step_state, update_value)); | ||
52 | |||
53 | DEFINE(THREAD_INFO_TASK_OFFSET, \ | ||
54 | offsetof(struct thread_info, task)); | ||
55 | DEFINE(THREAD_INFO_FLAGS_OFFSET, \ | ||
56 | offsetof(struct thread_info, flags)); | ||
57 | DEFINE(THREAD_INFO_STATUS_OFFSET, \ | ||
58 | offsetof(struct thread_info, status)); | ||
59 | DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \ | ||
60 | offsetof(struct thread_info, homecache_cpu)); | ||
61 | DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \ | ||
62 | offsetof(struct thread_info, step_state)); | ||
63 | |||
64 | DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET, | ||
65 | offsetof(struct task_struct, thread.ksp)); | ||
66 | DEFINE(TASK_STRUCT_THREAD_PC_OFFSET, | ||
67 | offsetof(struct task_struct, thread.pc)); | ||
68 | |||
69 | DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \ | ||
70 | offsetof(HV_Topology, width)); | ||
71 | DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \ | ||
72 | offsetof(HV_Topology, height)); | ||
73 | |||
74 | DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \ | ||
75 | offsetof(irq_cpustat_t, irq_syscall_count)); | ||
76 | } | ||
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c new file mode 100644 index 000000000000..1b0a410ef5e7 --- /dev/null +++ b/arch/tile/kernel/backtrace.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/string.h> | ||
17 | |||
18 | #include <asm/backtrace.h> | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #if TILE_CHIP < 10 | ||
23 | |||
24 | |||
25 | #include <asm/opcode-tile.h> | ||
26 | |||
27 | |||
28 | #define TREG_SP 54 | ||
29 | #define TREG_LR 55 | ||
30 | |||
31 | |||
32 | /** A decoded bundle used for backtracer analysis. */ | ||
33 | typedef struct { | ||
34 | tile_bundle_bits bits; | ||
35 | int num_insns; | ||
36 | struct tile_decoded_instruction | ||
37 | insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]; | ||
38 | } BacktraceBundle; | ||
39 | |||
40 | |||
41 | /* This implementation only makes sense for native tools. */ | ||
42 | /** Default function to read memory. */ | ||
43 | static bool | ||
44 | bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra) | ||
45 | { | ||
46 | /* FIXME: this should do some horrible signal stuff to catch | ||
47 | * SEGV cleanly and fail. | ||
48 | * | ||
49 | * Or else the caller should do the setjmp for efficiency. | ||
50 | */ | ||
51 | |||
52 | memcpy(result, (const void *)addr, size); | ||
53 | return true; | ||
54 | } | ||
55 | |||
56 | |||
57 | /** Locates an instruction inside the given bundle that | ||
58 | * has the specified mnemonic, and whose first 'num_operands_to_match' | ||
59 | * operands exactly match those in 'operand_values'. | ||
60 | */ | ||
61 | static const struct tile_decoded_instruction* | ||
62 | find_matching_insn(const BacktraceBundle *bundle, | ||
63 | tile_mnemonic mnemonic, | ||
64 | const int *operand_values, | ||
65 | int num_operands_to_match) | ||
66 | { | ||
67 | int i, j; | ||
68 | bool match; | ||
69 | |||
70 | for (i = 0; i < bundle->num_insns; i++) { | ||
71 | const struct tile_decoded_instruction *insn = | ||
72 | &bundle->insns[i]; | ||
73 | |||
74 | if (insn->opcode->mnemonic != mnemonic) | ||
75 | continue; | ||
76 | |||
77 | match = true; | ||
78 | for (j = 0; j < num_operands_to_match; j++) { | ||
79 | if (operand_values[j] != insn->operand_values[j]) { | ||
80 | match = false; | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | |||
85 | if (match) | ||
86 | return insn; | ||
87 | } | ||
88 | |||
89 | return NULL; | ||
90 | } | ||
91 | |||
92 | /** Does this bundle contain an 'iret' instruction? */ | ||
93 | static inline bool | ||
94 | bt_has_iret(const BacktraceBundle *bundle) | ||
95 | { | ||
96 | return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL; | ||
97 | } | ||
98 | |||
99 | /** Does this bundle contain an 'addi sp, sp, OFFSET' or | ||
100 | * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET? | ||
101 | */ | ||
102 | static bool | ||
103 | bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust) | ||
104 | { | ||
105 | static const int vals[2] = { TREG_SP, TREG_SP }; | ||
106 | |||
107 | const struct tile_decoded_instruction *insn = | ||
108 | find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2); | ||
109 | if (insn == NULL) | ||
110 | insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2); | ||
111 | if (insn == NULL) | ||
112 | return false; | ||
113 | |||
114 | *adjust = insn->operand_values[2]; | ||
115 | return true; | ||
116 | } | ||
117 | |||
118 | /** Does this bundle contain any 'info OP' or 'infol OP' | ||
119 | * instruction, and if so, what are their OP? Note that OP is interpreted | ||
120 | * as an unsigned value by this code since that's what the caller wants. | ||
121 | * Returns the number of info ops found. | ||
122 | */ | ||
123 | static int | ||
124 | bt_get_info_ops(const BacktraceBundle *bundle, | ||
125 | int operands[MAX_INFO_OPS_PER_BUNDLE]) | ||
126 | { | ||
127 | int num_ops = 0; | ||
128 | int i; | ||
129 | |||
130 | for (i = 0; i < bundle->num_insns; i++) { | ||
131 | const struct tile_decoded_instruction *insn = | ||
132 | &bundle->insns[i]; | ||
133 | |||
134 | if (insn->opcode->mnemonic == TILE_OPC_INFO || | ||
135 | insn->opcode->mnemonic == TILE_OPC_INFOL) { | ||
136 | operands[num_ops++] = insn->operand_values[0]; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | return num_ops; | ||
141 | } | ||
142 | |||
143 | /** Does this bundle contain a jrp instruction, and if so, to which | ||
144 | * register is it jumping? | ||
145 | */ | ||
146 | static bool | ||
147 | bt_has_jrp(const BacktraceBundle *bundle, int *target_reg) | ||
148 | { | ||
149 | const struct tile_decoded_instruction *insn = | ||
150 | find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0); | ||
151 | if (insn == NULL) | ||
152 | return false; | ||
153 | |||
154 | *target_reg = insn->operand_values[0]; | ||
155 | return true; | ||
156 | } | ||
157 | |||
158 | /** Does this bundle modify the specified register in any way? */ | ||
159 | static bool | ||
160 | bt_modifies_reg(const BacktraceBundle *bundle, int reg) | ||
161 | { | ||
162 | int i, j; | ||
163 | for (i = 0; i < bundle->num_insns; i++) { | ||
164 | const struct tile_decoded_instruction *insn = | ||
165 | &bundle->insns[i]; | ||
166 | |||
167 | if (insn->opcode->implicitly_written_register == reg) | ||
168 | return true; | ||
169 | |||
170 | for (j = 0; j < insn->opcode->num_operands; j++) | ||
171 | if (insn->operands[j]->is_dest_reg && | ||
172 | insn->operand_values[j] == reg) | ||
173 | return true; | ||
174 | } | ||
175 | |||
176 | return false; | ||
177 | } | ||
178 | |||
179 | /** Does this bundle modify sp? */ | ||
180 | static inline bool | ||
181 | bt_modifies_sp(const BacktraceBundle *bundle) | ||
182 | { | ||
183 | return bt_modifies_reg(bundle, TREG_SP); | ||
184 | } | ||
185 | |||
186 | /** Does this bundle modify lr? */ | ||
187 | static inline bool | ||
188 | bt_modifies_lr(const BacktraceBundle *bundle) | ||
189 | { | ||
190 | return bt_modifies_reg(bundle, TREG_LR); | ||
191 | } | ||
192 | |||
193 | /** Does this bundle contain the instruction 'move fp, sp'? */ | ||
194 | static inline bool | ||
195 | bt_has_move_r52_sp(const BacktraceBundle *bundle) | ||
196 | { | ||
197 | static const int vals[2] = { 52, TREG_SP }; | ||
198 | return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL; | ||
199 | } | ||
200 | |||
201 | /** Does this bundle contain the instruction 'sw sp, lr'? */ | ||
202 | static inline bool | ||
203 | bt_has_sw_sp_lr(const BacktraceBundle *bundle) | ||
204 | { | ||
205 | static const int vals[2] = { TREG_SP, TREG_LR }; | ||
206 | return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL; | ||
207 | } | ||
208 | |||
209 | /** Locates the caller's PC and SP for a program starting at the | ||
210 | * given address. | ||
211 | */ | ||
212 | static void | ||
213 | find_caller_pc_and_caller_sp(CallerLocation *location, | ||
214 | const VirtualAddress start_pc, | ||
215 | BacktraceMemoryReader read_memory_func, | ||
216 | void *read_memory_func_extra) | ||
217 | { | ||
218 | /* Have we explicitly decided what the sp is, | ||
219 | * rather than just the default? | ||
220 | */ | ||
221 | bool sp_determined = false; | ||
222 | |||
223 | /* Has any bundle seen so far modified lr? */ | ||
224 | bool lr_modified = false; | ||
225 | |||
226 | /* Have we seen a move from sp to fp? */ | ||
227 | bool sp_moved_to_r52 = false; | ||
228 | |||
229 | /* Have we seen a terminating bundle? */ | ||
230 | bool seen_terminating_bundle = false; | ||
231 | |||
232 | /* Cut down on round-trip reading overhead by reading several | ||
233 | * bundles at a time. | ||
234 | */ | ||
235 | tile_bundle_bits prefetched_bundles[32]; | ||
236 | int num_bundles_prefetched = 0; | ||
237 | int next_bundle = 0; | ||
238 | VirtualAddress pc; | ||
239 | |||
240 | /* Default to assuming that the caller's sp is the current sp. | ||
241 | * This is necessary to handle the case where we start backtracing | ||
242 | * right at the end of the epilog. | ||
243 | */ | ||
244 | location->sp_location = SP_LOC_OFFSET; | ||
245 | location->sp_offset = 0; | ||
246 | |||
247 | /* Default to having no idea where the caller PC is. */ | ||
248 | location->pc_location = PC_LOC_UNKNOWN; | ||
249 | |||
250 | /* Don't even try if the PC is not aligned. */ | ||
251 | if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) | ||
252 | return; | ||
253 | |||
254 | for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) { | ||
255 | |||
256 | BacktraceBundle bundle; | ||
257 | int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE]; | ||
258 | int one_ago, jrp_reg; | ||
259 | bool has_jrp; | ||
260 | |||
261 | if (next_bundle >= num_bundles_prefetched) { | ||
262 | /* Prefetch some bytes, but don't cross a page | ||
263 | * boundary since that might cause a read failure we | ||
264 | * don't care about if we only need the first few | ||
265 | * bytes. Note: we don't care what the actual page | ||
266 | * size is; using the minimum possible page size will | ||
267 | * prevent any problems. | ||
268 | */ | ||
269 | unsigned int bytes_to_prefetch = 4096 - (pc & 4095); | ||
270 | if (bytes_to_prefetch > sizeof prefetched_bundles) | ||
271 | bytes_to_prefetch = sizeof prefetched_bundles; | ||
272 | |||
273 | if (!read_memory_func(prefetched_bundles, pc, | ||
274 | bytes_to_prefetch, | ||
275 | read_memory_func_extra)) { | ||
276 | if (pc == start_pc) { | ||
277 | /* The program probably called a bad | ||
278 | * address, such as a NULL pointer. | ||
279 | * So treat this as if we are at the | ||
280 | * start of the function prolog so the | ||
281 | * backtrace will show how we got here. | ||
282 | */ | ||
283 | location->pc_location = PC_LOC_IN_LR; | ||
284 | return; | ||
285 | } | ||
286 | |||
287 | /* Unreadable address. Give up. */ | ||
288 | break; | ||
289 | } | ||
290 | |||
291 | next_bundle = 0; | ||
292 | num_bundles_prefetched = | ||
293 | bytes_to_prefetch / sizeof(tile_bundle_bits); | ||
294 | } | ||
295 | |||
296 | /* Decode the next bundle. */ | ||
297 | bundle.bits = prefetched_bundles[next_bundle++]; | ||
298 | bundle.num_insns = | ||
299 | parse_insn_tile(bundle.bits, pc, bundle.insns); | ||
300 | num_info_ops = bt_get_info_ops(&bundle, info_operands); | ||
301 | |||
302 | /* First look at any one_ago info ops if they are interesting, | ||
303 | * since they should shadow any non-one-ago info ops. | ||
304 | */ | ||
305 | for (one_ago = (pc != start_pc) ? 1 : 0; | ||
306 | one_ago >= 0; one_ago--) { | ||
307 | int i; | ||
308 | for (i = 0; i < num_info_ops; i++) { | ||
309 | int info_operand = info_operands[i]; | ||
310 | if (info_operand < CALLER_UNKNOWN_BASE) { | ||
311 | /* Weird; reserved value, ignore it. */ | ||
312 | continue; | ||
313 | } | ||
314 | |||
315 | /* Skip info ops which are not in the | ||
316 | * "one_ago" mode we want right now. | ||
317 | */ | ||
318 | if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0) | ||
319 | != (one_ago != 0)) | ||
320 | continue; | ||
321 | |||
322 | /* Clear the flag to make later checking | ||
323 | * easier. */ | ||
324 | info_operand &= ~ONE_BUNDLE_AGO_FLAG; | ||
325 | |||
326 | /* Default to looking at PC_IN_LR_FLAG. */ | ||
327 | if (info_operand & PC_IN_LR_FLAG) | ||
328 | location->pc_location = | ||
329 | PC_LOC_IN_LR; | ||
330 | else | ||
331 | location->pc_location = | ||
332 | PC_LOC_ON_STACK; | ||
333 | |||
334 | switch (info_operand) { | ||
335 | case CALLER_UNKNOWN_BASE: | ||
336 | location->pc_location = PC_LOC_UNKNOWN; | ||
337 | location->sp_location = SP_LOC_UNKNOWN; | ||
338 | return; | ||
339 | |||
340 | case CALLER_SP_IN_R52_BASE: | ||
341 | case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG: | ||
342 | location->sp_location = SP_LOC_IN_R52; | ||
343 | return; | ||
344 | |||
345 | default: | ||
346 | { | ||
347 | const unsigned int val = info_operand | ||
348 | - CALLER_SP_OFFSET_BASE; | ||
349 | const unsigned int sp_offset = | ||
350 | (val >> NUM_INFO_OP_FLAGS) * 8; | ||
351 | if (sp_offset < 32768) { | ||
352 | /* This is a properly encoded | ||
353 | * SP offset. */ | ||
354 | location->sp_location = | ||
355 | SP_LOC_OFFSET; | ||
356 | location->sp_offset = | ||
357 | sp_offset; | ||
358 | return; | ||
359 | } else { | ||
360 | /* This looked like an SP | ||
361 | * offset, but it's outside | ||
362 | * the legal range, so this | ||
363 | * must be an unrecognized | ||
364 | * info operand. Ignore it. | ||
365 | */ | ||
366 | } | ||
367 | } | ||
368 | break; | ||
369 | } | ||
370 | } | ||
371 | } | ||
372 | |||
373 | if (seen_terminating_bundle) { | ||
374 | /* We saw a terminating bundle during the previous | ||
375 | * iteration, so we were only looking for an info op. | ||
376 | */ | ||
377 | break; | ||
378 | } | ||
379 | |||
380 | if (bundle.bits == 0) { | ||
381 | /* Wacky terminating bundle. Stop looping, and hope | ||
382 | * we've already seen enough to find the caller. | ||
383 | */ | ||
384 | break; | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * Try to determine caller's SP. | ||
389 | */ | ||
390 | |||
391 | if (!sp_determined) { | ||
392 | int adjust; | ||
393 | if (bt_has_addi_sp(&bundle, &adjust)) { | ||
394 | location->sp_location = SP_LOC_OFFSET; | ||
395 | |||
396 | if (adjust <= 0) { | ||
397 | /* We are in prolog about to adjust | ||
398 | * SP. */ | ||
399 | location->sp_offset = 0; | ||
400 | } else { | ||
401 | /* We are in epilog restoring SP. */ | ||
402 | location->sp_offset = adjust; | ||
403 | } | ||
404 | |||
405 | sp_determined = true; | ||
406 | } else { | ||
407 | if (bt_has_move_r52_sp(&bundle)) { | ||
408 | /* Maybe in prolog, creating an | ||
409 | * alloca-style frame. But maybe in | ||
410 | * the middle of a fixed-size frame | ||
411 | * clobbering r52 with SP. | ||
412 | */ | ||
413 | sp_moved_to_r52 = true; | ||
414 | } | ||
415 | |||
416 | if (bt_modifies_sp(&bundle)) { | ||
417 | if (sp_moved_to_r52) { | ||
418 | /* We saw SP get saved into | ||
419 | * r52 earlier (or now), which | ||
420 | * must have been in the | ||
421 | * prolog, so we now know that | ||
422 | * SP is still holding the | ||
423 | * caller's sp value. | ||
424 | */ | ||
425 | location->sp_location = | ||
426 | SP_LOC_OFFSET; | ||
427 | location->sp_offset = 0; | ||
428 | } else { | ||
429 | /* Someone must have saved | ||
430 | * aside the caller's SP value | ||
431 | * into r52, so r52 holds the | ||
432 | * current value. | ||
433 | */ | ||
434 | location->sp_location = | ||
435 | SP_LOC_IN_R52; | ||
436 | } | ||
437 | sp_determined = true; | ||
438 | } | ||
439 | } | ||
440 | } | ||
441 | |||
442 | if (bt_has_iret(&bundle)) { | ||
443 | /* This is a terminating bundle. */ | ||
444 | seen_terminating_bundle = true; | ||
445 | continue; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * Try to determine caller's PC. | ||
450 | */ | ||
451 | |||
452 | jrp_reg = -1; | ||
453 | has_jrp = bt_has_jrp(&bundle, &jrp_reg); | ||
454 | if (has_jrp) | ||
455 | seen_terminating_bundle = true; | ||
456 | |||
457 | if (location->pc_location == PC_LOC_UNKNOWN) { | ||
458 | if (has_jrp) { | ||
459 | if (jrp_reg == TREG_LR && !lr_modified) { | ||
460 | /* Looks like a leaf function, or else | ||
461 | * lr is already restored. */ | ||
462 | location->pc_location = | ||
463 | PC_LOC_IN_LR; | ||
464 | } else { | ||
465 | location->pc_location = | ||
466 | PC_LOC_ON_STACK; | ||
467 | } | ||
468 | } else if (bt_has_sw_sp_lr(&bundle)) { | ||
469 | /* In prolog, spilling initial lr to stack. */ | ||
470 | location->pc_location = PC_LOC_IN_LR; | ||
471 | } else if (bt_modifies_lr(&bundle)) { | ||
472 | lr_modified = true; | ||
473 | } | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | |||
478 | void | ||
479 | backtrace_init(BacktraceIterator *state, | ||
480 | BacktraceMemoryReader read_memory_func, | ||
481 | void *read_memory_func_extra, | ||
482 | VirtualAddress pc, VirtualAddress lr, | ||
483 | VirtualAddress sp, VirtualAddress r52) | ||
484 | { | ||
485 | CallerLocation location; | ||
486 | VirtualAddress fp, initial_frame_caller_pc; | ||
487 | |||
488 | if (read_memory_func == NULL) { | ||
489 | read_memory_func = bt_read_memory; | ||
490 | } | ||
491 | |||
492 | /* Find out where we are in the initial frame. */ | ||
493 | find_caller_pc_and_caller_sp(&location, pc, | ||
494 | read_memory_func, read_memory_func_extra); | ||
495 | |||
496 | switch (location.sp_location) { | ||
497 | case SP_LOC_UNKNOWN: | ||
498 | /* Give up. */ | ||
499 | fp = -1; | ||
500 | break; | ||
501 | |||
502 | case SP_LOC_IN_R52: | ||
503 | fp = r52; | ||
504 | break; | ||
505 | |||
506 | case SP_LOC_OFFSET: | ||
507 | fp = sp + location.sp_offset; | ||
508 | break; | ||
509 | |||
510 | default: | ||
511 | /* Give up. */ | ||
512 | fp = -1; | ||
513 | break; | ||
514 | } | ||
515 | |||
516 | /* The frame pointer should theoretically be aligned mod 8. If | ||
517 | * it's not even aligned mod 4 then something terrible happened | ||
518 | * and we should mark it as invalid. | ||
519 | */ | ||
520 | if (fp % 4 != 0) | ||
521 | fp = -1; | ||
522 | |||
523 | /* -1 means "don't know initial_frame_caller_pc". */ | ||
524 | initial_frame_caller_pc = -1; | ||
525 | |||
526 | switch (location.pc_location) { | ||
527 | case PC_LOC_UNKNOWN: | ||
528 | /* Give up. */ | ||
529 | fp = -1; | ||
530 | break; | ||
531 | |||
532 | case PC_LOC_IN_LR: | ||
533 | if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { | ||
534 | /* Give up. */ | ||
535 | fp = -1; | ||
536 | } else { | ||
537 | initial_frame_caller_pc = lr; | ||
538 | } | ||
539 | break; | ||
540 | |||
541 | case PC_LOC_ON_STACK: | ||
542 | /* Leave initial_frame_caller_pc as -1, | ||
543 | * meaning check the stack. | ||
544 | */ | ||
545 | break; | ||
546 | |||
547 | default: | ||
548 | /* Give up. */ | ||
549 | fp = -1; | ||
550 | break; | ||
551 | } | ||
552 | |||
553 | state->pc = pc; | ||
554 | state->sp = sp; | ||
555 | state->fp = fp; | ||
556 | state->initial_frame_caller_pc = initial_frame_caller_pc; | ||
557 | state->read_memory_func = read_memory_func; | ||
558 | state->read_memory_func_extra = read_memory_func_extra; | ||
559 | } | ||
560 | |||
561 | bool | ||
562 | backtrace_next(BacktraceIterator *state) | ||
563 | { | ||
564 | VirtualAddress next_fp, next_pc, next_frame[2]; | ||
565 | |||
566 | if (state->fp == -1) { | ||
567 | /* No parent frame. */ | ||
568 | return false; | ||
569 | } | ||
570 | |||
571 | /* Try to read the frame linkage data chaining to the next function. */ | ||
572 | if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame, | ||
573 | state->read_memory_func_extra)) { | ||
574 | return false; | ||
575 | } | ||
576 | |||
577 | next_fp = next_frame[1]; | ||
578 | if (next_fp % 4 != 0) { | ||
579 | /* Caller's frame pointer is suspect, so give up. | ||
580 | * Technically it should be aligned mod 8, but we will | ||
581 | * be forgiving here. | ||
582 | */ | ||
583 | return false; | ||
584 | } | ||
585 | |||
586 | if (state->initial_frame_caller_pc != -1) { | ||
587 | /* We must be in the initial stack frame and already know the | ||
588 | * caller PC. | ||
589 | */ | ||
590 | next_pc = state->initial_frame_caller_pc; | ||
591 | |||
592 | /* Force reading stack next time, in case we were in the | ||
593 | * initial frame. We don't do this above just to paranoidly | ||
594 | * avoid changing the struct at all when we return false. | ||
595 | */ | ||
596 | state->initial_frame_caller_pc = -1; | ||
597 | } else { | ||
598 | /* Get the caller PC from the frame linkage area. */ | ||
599 | next_pc = next_frame[0]; | ||
600 | if (next_pc == 0 || | ||
601 | next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) { | ||
602 | /* The PC is suspect, so give up. */ | ||
603 | return false; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | /* Update state to become the caller's stack frame. */ | ||
608 | state->pc = next_pc; | ||
609 | state->sp = state->fp; | ||
610 | state->fp = next_fp; | ||
611 | |||
612 | return true; | ||
613 | } | ||
614 | |||
615 | #else /* TILE_CHIP < 10 */ | ||
616 | |||
617 | void | ||
618 | backtrace_init(BacktraceIterator *state, | ||
619 | BacktraceMemoryReader read_memory_func, | ||
620 | void *read_memory_func_extra, | ||
621 | VirtualAddress pc, VirtualAddress lr, | ||
622 | VirtualAddress sp, VirtualAddress r52) | ||
623 | { | ||
624 | state->pc = pc; | ||
625 | state->sp = sp; | ||
626 | state->fp = -1; | ||
627 | state->initial_frame_caller_pc = -1; | ||
628 | state->read_memory_func = read_memory_func; | ||
629 | state->read_memory_func_extra = read_memory_func_extra; | ||
630 | } | ||
631 | |||
632 | bool backtrace_next(BacktraceIterator *state) { return false; } | ||
633 | |||
634 | #endif /* TILE_CHIP < 10 */ | ||
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c new file mode 100644 index 000000000000..a374c99deeb6 --- /dev/null +++ b/arch/tile/kernel/compat.c | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* Adjust unistd.h to provide 32-bit numbers and functions. */ | ||
16 | #define __SYSCALL_COMPAT | ||
17 | |||
18 | #include <linux/compat.h> | ||
19 | #include <linux/msg.h> | ||
20 | #include <linux/syscalls.h> | ||
21 | #include <linux/kdev_t.h> | ||
22 | #include <linux/fs.h> | ||
23 | #include <linux/fcntl.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | #include <linux/signal.h> | ||
27 | #include <asm/syscalls.h> | ||
28 | |||
29 | /* | ||
30 | * Syscalls that take 64-bit numbers traditionally take them in 32-bit | ||
31 | * "high" and "low" value parts on 32-bit architectures. | ||
32 | * In principle, one could imagine passing some register arguments as | ||
33 | * fully 64-bit on TILE-Gx in 32-bit mode, but it seems easier to | ||
34 | * adapt the usual convention. | ||
35 | */ | ||
36 | |||
37 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high) | ||
38 | { | ||
39 | return sys_truncate(filename, ((loff_t)high << 32) | low); | ||
40 | } | ||
41 | |||
42 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high) | ||
43 | { | ||
44 | return sys_ftruncate(fd, ((loff_t)high << 32) | low); | ||
45 | } | ||
46 | |||
47 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
48 | u32 dummy, u32 low, u32 high) | ||
49 | { | ||
50 | return sys_pread64(fd, ubuf, count, ((loff_t)high << 32) | low); | ||
51 | } | ||
52 | |||
53 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
54 | u32 dummy, u32 low, u32 high) | ||
55 | { | ||
56 | return sys_pwrite64(fd, ubuf, count, ((loff_t)high << 32) | low); | ||
57 | } | ||
58 | |||
59 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len) | ||
60 | { | ||
61 | return sys_lookup_dcookie(((loff_t)high << 32) | low, buf, len); | ||
62 | } | ||
63 | |||
64 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
65 | u32 offset_lo, u32 offset_hi, | ||
66 | u32 nbytes_lo, u32 nbytes_hi) | ||
67 | { | ||
68 | return sys_sync_file_range(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
69 | ((loff_t)nbytes_hi << 32) | nbytes_lo, | ||
70 | flags); | ||
71 | } | ||
72 | |||
73 | long compat_sys_fallocate(int fd, int mode, | ||
74 | u32 offset_lo, u32 offset_hi, | ||
75 | u32 len_lo, u32 len_hi) | ||
76 | { | ||
77 | return sys_fallocate(fd, mode, ((loff_t)offset_hi << 32) | offset_lo, | ||
78 | ((loff_t)len_hi << 32) | len_lo); | ||
79 | } | ||
80 | |||
81 | |||
82 | |||
83 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
84 | struct compat_timespec __user *interval) | ||
85 | { | ||
86 | struct timespec t; | ||
87 | int ret; | ||
88 | mm_segment_t old_fs = get_fs(); | ||
89 | |||
90 | set_fs(KERNEL_DS); | ||
91 | ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); | ||
92 | set_fs(old_fs); | ||
93 | if (put_compat_timespec(&t, interval)) | ||
94 | return -EFAULT; | ||
95 | return ret; | ||
96 | } | ||
97 | |||
98 | ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | ||
99 | size_t count) | ||
100 | { | ||
101 | mm_segment_t old_fs = get_fs(); | ||
102 | int ret; | ||
103 | off_t of; | ||
104 | |||
105 | if (offset && get_user(of, offset)) | ||
106 | return -EFAULT; | ||
107 | |||
108 | set_fs(KERNEL_DS); | ||
109 | ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, | ||
110 | count); | ||
111 | set_fs(old_fs); | ||
112 | |||
113 | if (offset && put_user(of, offset)) | ||
114 | return -EFAULT; | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | |||
119 | /* | ||
120 | * The usual compat_sys_msgsnd() and _msgrcv() seem to be assuming | ||
121 | * some different calling convention than our normal 32-bit tile code. | ||
122 | */ | ||
123 | |||
124 | /* Already defined in ipc/compat.c, but we need it here. */ | ||
125 | struct compat_msgbuf { | ||
126 | compat_long_t mtype; | ||
127 | char mtext[1]; | ||
128 | }; | ||
129 | |||
130 | long tile_compat_sys_msgsnd(int msqid, | ||
131 | struct compat_msgbuf __user *msgp, | ||
132 | size_t msgsz, int msgflg) | ||
133 | { | ||
134 | compat_long_t mtype; | ||
135 | |||
136 | if (get_user(mtype, &msgp->mtype)) | ||
137 | return -EFAULT; | ||
138 | return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); | ||
139 | } | ||
140 | |||
141 | long tile_compat_sys_msgrcv(int msqid, | ||
142 | struct compat_msgbuf __user *msgp, | ||
143 | size_t msgsz, long msgtyp, int msgflg) | ||
144 | { | ||
145 | long err, mtype; | ||
146 | |||
147 | err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); | ||
148 | if (err < 0) | ||
149 | goto out; | ||
150 | |||
151 | if (put_user(mtype, &msgp->mtype)) | ||
152 | err = -EFAULT; | ||
153 | out: | ||
154 | return err; | ||
155 | } | ||
156 | |||
157 | /* Provide the compat syscall number to call mapping. */ | ||
158 | #undef __SYSCALL | ||
159 | #define __SYSCALL(nr, call) [nr] = (compat_##call), | ||
160 | |||
161 | /* The generic versions of these don't work for Tile. */ | ||
162 | #define compat_sys_msgrcv tile_compat_sys_msgrcv | ||
163 | #define compat_sys_msgsnd tile_compat_sys_msgsnd | ||
164 | |||
165 | /* See comments in sys.c */ | ||
166 | #define compat_sys_fadvise64 sys32_fadvise64 | ||
167 | #define compat_sys_fadvise64_64 sys32_fadvise64_64 | ||
168 | #define compat_sys_readahead sys32_readahead | ||
169 | #define compat_sys_sync_file_range compat_sys_sync_file_range2 | ||
170 | |||
171 | /* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */ | ||
172 | #define compat_sys_stat64 sys_newstat | ||
173 | #define compat_sys_lstat64 sys_newlstat | ||
174 | #define compat_sys_fstat64 sys_newfstat | ||
175 | #define compat_sys_fstatat64 sys_newfstatat | ||
176 | |||
177 | /* Pass full 64-bit values through ptrace. */ | ||
178 | #define compat_sys_ptrace tile_compat_sys_ptrace | ||
179 | |||
180 | void *compat_sys_call_table[__NR_syscalls] = { | ||
181 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
182 | #include <asm/unistd.h> | ||
183 | }; | ||
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c new file mode 100644 index 000000000000..9fa4ba8ed5f4 --- /dev/null +++ b/arch/tile/kernel/compat_signal.c | |||
@@ -0,0 +1,433 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/smp_lock.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <linux/unistd.h> | ||
24 | #include <linux/stddef.h> | ||
25 | #include <linux/personality.h> | ||
26 | #include <linux/suspend.h> | ||
27 | #include <linux/ptrace.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/compat.h> | ||
30 | #include <linux/syscalls.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <asm/processor.h> | ||
33 | #include <asm/ucontext.h> | ||
34 | #include <asm/sigframe.h> | ||
35 | #include <arch/interrupts.h> | ||
36 | |||
37 | struct compat_sigaction { | ||
38 | compat_uptr_t sa_handler; | ||
39 | compat_ulong_t sa_flags; | ||
40 | compat_uptr_t sa_restorer; | ||
41 | sigset_t sa_mask; /* mask last for extensibility */ | ||
42 | }; | ||
43 | |||
44 | struct compat_sigaltstack { | ||
45 | compat_uptr_t ss_sp; | ||
46 | int ss_flags; | ||
47 | compat_size_t ss_size; | ||
48 | }; | ||
49 | |||
50 | struct compat_ucontext { | ||
51 | compat_ulong_t uc_flags; | ||
52 | compat_uptr_t uc_link; | ||
53 | struct compat_sigaltstack uc_stack; | ||
54 | struct sigcontext uc_mcontext; | ||
55 | sigset_t uc_sigmask; /* mask last for extensibility */ | ||
56 | }; | ||
57 | |||
58 | struct compat_siginfo { | ||
59 | int si_signo; | ||
60 | int si_errno; | ||
61 | int si_code; | ||
62 | |||
63 | union { | ||
64 | int _pad[SI_PAD_SIZE]; | ||
65 | |||
66 | /* kill() */ | ||
67 | struct { | ||
68 | unsigned int _pid; /* sender's pid */ | ||
69 | unsigned int _uid; /* sender's uid */ | ||
70 | } _kill; | ||
71 | |||
72 | /* POSIX.1b timers */ | ||
73 | struct { | ||
74 | compat_timer_t _tid; /* timer id */ | ||
75 | int _overrun; /* overrun count */ | ||
76 | compat_sigval_t _sigval; /* same as below */ | ||
77 | int _sys_private; /* not to be passed to user */ | ||
78 | int _overrun_incr; /* amount to add to overrun */ | ||
79 | } _timer; | ||
80 | |||
81 | /* POSIX.1b signals */ | ||
82 | struct { | ||
83 | unsigned int _pid; /* sender's pid */ | ||
84 | unsigned int _uid; /* sender's uid */ | ||
85 | compat_sigval_t _sigval; | ||
86 | } _rt; | ||
87 | |||
88 | /* SIGCHLD */ | ||
89 | struct { | ||
90 | unsigned int _pid; /* which child */ | ||
91 | unsigned int _uid; /* sender's uid */ | ||
92 | int _status; /* exit code */ | ||
93 | compat_clock_t _utime; | ||
94 | compat_clock_t _stime; | ||
95 | } _sigchld; | ||
96 | |||
97 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ | ||
98 | struct { | ||
99 | unsigned int _addr; /* faulting insn/memory ref. */ | ||
100 | #ifdef __ARCH_SI_TRAPNO | ||
101 | int _trapno; /* TRAP # which caused the signal */ | ||
102 | #endif | ||
103 | } _sigfault; | ||
104 | |||
105 | /* SIGPOLL */ | ||
106 | struct { | ||
107 | int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
108 | int _fd; | ||
109 | } _sigpoll; | ||
110 | } _sifields; | ||
111 | }; | ||
112 | |||
113 | struct compat_rt_sigframe { | ||
114 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
115 | struct compat_siginfo info; | ||
116 | struct compat_ucontext uc; | ||
117 | }; | ||
118 | |||
119 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
120 | |||
121 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
122 | struct compat_sigaction __user *oact, | ||
123 | size_t sigsetsize) | ||
124 | { | ||
125 | struct k_sigaction new_sa, old_sa; | ||
126 | int ret = -EINVAL; | ||
127 | |||
128 | /* XXX: Don't preclude handling different sized sigset_t's. */ | ||
129 | if (sigsetsize != sizeof(sigset_t)) | ||
130 | goto out; | ||
131 | |||
132 | if (act) { | ||
133 | compat_uptr_t handler, restorer; | ||
134 | |||
135 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || | ||
136 | __get_user(handler, &act->sa_handler) || | ||
137 | __get_user(new_sa.sa.sa_flags, &act->sa_flags) || | ||
138 | __get_user(restorer, &act->sa_restorer) || | ||
139 | __copy_from_user(&new_sa.sa.sa_mask, &act->sa_mask, | ||
140 | sizeof(sigset_t))) | ||
141 | return -EFAULT; | ||
142 | new_sa.sa.sa_handler = compat_ptr(handler); | ||
143 | new_sa.sa.sa_restorer = compat_ptr(restorer); | ||
144 | } | ||
145 | |||
146 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | ||
147 | |||
148 | if (!ret && oact) { | ||
149 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || | ||
150 | __put_user(ptr_to_compat(old_sa.sa.sa_handler), | ||
151 | &oact->sa_handler) || | ||
152 | __put_user(ptr_to_compat(old_sa.sa.sa_restorer), | ||
153 | &oact->sa_restorer) || | ||
154 | __put_user(old_sa.sa.sa_flags, &oact->sa_flags) || | ||
155 | __copy_to_user(&oact->sa_mask, &old_sa.sa.sa_mask, | ||
156 | sizeof(sigset_t))) | ||
157 | return -EFAULT; | ||
158 | } | ||
159 | out: | ||
160 | return ret; | ||
161 | } | ||
162 | |||
163 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
164 | struct compat_siginfo __user *uinfo) | ||
165 | { | ||
166 | siginfo_t info; | ||
167 | int ret; | ||
168 | mm_segment_t old_fs = get_fs(); | ||
169 | |||
170 | if (copy_siginfo_from_user32(&info, uinfo)) | ||
171 | return -EFAULT; | ||
172 | set_fs(KERNEL_DS); | ||
173 | ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info); | ||
174 | set_fs(old_fs); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from) | ||
179 | { | ||
180 | int err; | ||
181 | |||
182 | if (!access_ok(VERIFY_WRITE, to, sizeof(struct compat_siginfo))) | ||
183 | return -EFAULT; | ||
184 | |||
185 | /* If you change siginfo_t structure, please make sure that | ||
186 | this code is fixed accordingly. | ||
187 | It should never copy any pad contained in the structure | ||
188 | to avoid security leaks, but must copy the generic | ||
189 | 3 ints plus the relevant union member. */ | ||
190 | err = __put_user(from->si_signo, &to->si_signo); | ||
191 | err |= __put_user(from->si_errno, &to->si_errno); | ||
192 | err |= __put_user((short)from->si_code, &to->si_code); | ||
193 | |||
194 | if (from->si_code < 0) { | ||
195 | err |= __put_user(from->si_pid, &to->si_pid); | ||
196 | err |= __put_user(from->si_uid, &to->si_uid); | ||
197 | err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr); | ||
198 | } else { | ||
199 | /* | ||
200 | * First 32bits of unions are always present: | ||
201 | * si_pid === si_band === si_tid === si_addr(LS half) | ||
202 | */ | ||
203 | err |= __put_user(from->_sifields._pad[0], | ||
204 | &to->_sifields._pad[0]); | ||
205 | switch (from->si_code >> 16) { | ||
206 | case __SI_FAULT >> 16: | ||
207 | break; | ||
208 | case __SI_CHLD >> 16: | ||
209 | err |= __put_user(from->si_utime, &to->si_utime); | ||
210 | err |= __put_user(from->si_stime, &to->si_stime); | ||
211 | err |= __put_user(from->si_status, &to->si_status); | ||
212 | /* FALL THROUGH */ | ||
213 | default: | ||
214 | case __SI_KILL >> 16: | ||
215 | err |= __put_user(from->si_uid, &to->si_uid); | ||
216 | break; | ||
217 | case __SI_POLL >> 16: | ||
218 | err |= __put_user(from->si_fd, &to->si_fd); | ||
219 | break; | ||
220 | case __SI_TIMER >> 16: | ||
221 | err |= __put_user(from->si_overrun, &to->si_overrun); | ||
222 | err |= __put_user(ptr_to_compat(from->si_ptr), | ||
223 | &to->si_ptr); | ||
224 | break; | ||
225 | /* This is not generated by the kernel as of now. */ | ||
226 | case __SI_RT >> 16: | ||
227 | case __SI_MESGQ >> 16: | ||
228 | err |= __put_user(from->si_uid, &to->si_uid); | ||
229 | err |= __put_user(from->si_int, &to->si_int); | ||
230 | break; | ||
231 | } | ||
232 | } | ||
233 | return err; | ||
234 | } | ||
235 | |||
236 | int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) | ||
237 | { | ||
238 | int err; | ||
239 | u32 ptr32; | ||
240 | |||
241 | if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) | ||
242 | return -EFAULT; | ||
243 | |||
244 | err = __get_user(to->si_signo, &from->si_signo); | ||
245 | err |= __get_user(to->si_errno, &from->si_errno); | ||
246 | err |= __get_user(to->si_code, &from->si_code); | ||
247 | |||
248 | err |= __get_user(to->si_pid, &from->si_pid); | ||
249 | err |= __get_user(to->si_uid, &from->si_uid); | ||
250 | err |= __get_user(ptr32, &from->si_ptr); | ||
251 | to->si_ptr = compat_ptr(ptr32); | ||
252 | |||
253 | return err; | ||
254 | } | ||
255 | |||
256 | long _compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
257 | struct compat_sigaltstack __user *uoss_ptr, | ||
258 | struct pt_regs *regs) | ||
259 | { | ||
260 | stack_t uss, uoss; | ||
261 | int ret; | ||
262 | mm_segment_t seg; | ||
263 | |||
264 | if (uss_ptr) { | ||
265 | u32 ptr; | ||
266 | |||
267 | memset(&uss, 0, sizeof(stack_t)); | ||
268 | if (!access_ok(VERIFY_READ, uss_ptr, sizeof(*uss_ptr)) || | ||
269 | __get_user(ptr, &uss_ptr->ss_sp) || | ||
270 | __get_user(uss.ss_flags, &uss_ptr->ss_flags) || | ||
271 | __get_user(uss.ss_size, &uss_ptr->ss_size)) | ||
272 | return -EFAULT; | ||
273 | uss.ss_sp = compat_ptr(ptr); | ||
274 | } | ||
275 | seg = get_fs(); | ||
276 | set_fs(KERNEL_DS); | ||
277 | ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, | ||
278 | (unsigned long)compat_ptr(regs->sp)); | ||
279 | set_fs(seg); | ||
280 | if (ret >= 0 && uoss_ptr) { | ||
281 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(*uoss_ptr)) || | ||
282 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || | ||
283 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || | ||
284 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) | ||
285 | ret = -EFAULT; | ||
286 | } | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | long _compat_sys_rt_sigreturn(struct pt_regs *regs) | ||
291 | { | ||
292 | struct compat_rt_sigframe __user *frame = | ||
293 | (struct compat_rt_sigframe __user *) compat_ptr(regs->sp); | ||
294 | sigset_t set; | ||
295 | long r0; | ||
296 | |||
297 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
298 | goto badframe; | ||
299 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
300 | goto badframe; | ||
301 | |||
302 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
303 | spin_lock_irq(¤t->sighand->siglock); | ||
304 | current->blocked = set; | ||
305 | recalc_sigpending(); | ||
306 | spin_unlock_irq(¤t->sighand->siglock); | ||
307 | |||
308 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) | ||
309 | goto badframe; | ||
310 | |||
311 | if (_compat_sys_sigaltstack(&frame->uc.uc_stack, NULL, regs) != 0) | ||
312 | goto badframe; | ||
313 | |||
314 | return r0; | ||
315 | |||
316 | badframe: | ||
317 | force_sig(SIGSEGV, current); | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Determine which stack to use.. | ||
323 | */ | ||
324 | static inline void __user *compat_get_sigframe(struct k_sigaction *ka, | ||
325 | struct pt_regs *regs, | ||
326 | size_t frame_size) | ||
327 | { | ||
328 | unsigned long sp; | ||
329 | |||
330 | /* Default to using normal stack */ | ||
331 | sp = (unsigned long)compat_ptr(regs->sp); | ||
332 | |||
333 | /* | ||
334 | * If we are on the alternate signal stack and would overflow | ||
335 | * it, don't. Return an always-bogus address instead so we | ||
336 | * will die with SIGSEGV. | ||
337 | */ | ||
338 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
339 | return (void __user *) -1L; | ||
340 | |||
341 | /* This is the X/Open sanctioned signal stack switching. */ | ||
342 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
343 | if (sas_ss_flags(sp) == 0) | ||
344 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
345 | } | ||
346 | |||
347 | sp -= frame_size; | ||
348 | /* | ||
349 | * Align the stack pointer according to the TILE ABI, | ||
350 | * i.e. so that on function entry (sp & 15) == 0. | ||
351 | */ | ||
352 | sp &= -16UL; | ||
353 | return (void __user *) sp; | ||
354 | } | ||
355 | |||
356 | int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
357 | sigset_t *set, struct pt_regs *regs) | ||
358 | { | ||
359 | unsigned long restorer; | ||
360 | struct compat_rt_sigframe __user *frame; | ||
361 | int err = 0; | ||
362 | int usig; | ||
363 | |||
364 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | ||
365 | |||
366 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
367 | goto give_sigsegv; | ||
368 | |||
369 | usig = current_thread_info()->exec_domain | ||
370 | && current_thread_info()->exec_domain->signal_invmap | ||
371 | && sig < 32 | ||
372 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
373 | : sig; | ||
374 | |||
375 | /* Always write at least the signal number for the stack backtracer. */ | ||
376 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
377 | /* At sigreturn time, restore the callee-save registers too. */ | ||
378 | err |= copy_siginfo_to_user32(&frame->info, info); | ||
379 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
380 | } else { | ||
381 | err |= __put_user(info->si_signo, &frame->info.si_signo); | ||
382 | } | ||
383 | |||
384 | /* Create the ucontext. */ | ||
385 | err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); | ||
386 | err |= __put_user(0, &frame->uc.uc_flags); | ||
387 | err |= __put_user(0, &frame->uc.uc_link); | ||
388 | err |= __put_user(ptr_to_compat((void *)(current->sas_ss_sp)), | ||
389 | &frame->uc.uc_stack.ss_sp); | ||
390 | err |= __put_user(sas_ss_flags(regs->sp), | ||
391 | &frame->uc.uc_stack.ss_flags); | ||
392 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
393 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | ||
394 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
395 | if (err) | ||
396 | goto give_sigsegv; | ||
397 | |||
398 | restorer = VDSO_BASE; | ||
399 | if (ka->sa.sa_flags & SA_RESTORER) | ||
400 | restorer = ptr_to_compat_reg(ka->sa.sa_restorer); | ||
401 | |||
402 | /* | ||
403 | * Set up registers for signal handler. | ||
404 | * Registers that we don't modify keep the value they had from | ||
405 | * user-space at the time we took the signal. | ||
406 | */ | ||
407 | regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); | ||
408 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | ||
409 | regs->sp = ptr_to_compat_reg(frame); | ||
410 | regs->lr = restorer; | ||
411 | regs->regs[0] = (unsigned long) usig; | ||
412 | |||
413 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
414 | /* Need extra arguments, so mark to restore caller-saves. */ | ||
415 | regs->regs[1] = ptr_to_compat_reg(&frame->info); | ||
416 | regs->regs[2] = ptr_to_compat_reg(&frame->uc); | ||
417 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * Notify any tracer that was single-stepping it. | ||
422 | * The tracer may want to single-step inside the | ||
423 | * handler too. | ||
424 | */ | ||
425 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
426 | ptrace_notify(SIGTRAP); | ||
427 | |||
428 | return 0; | ||
429 | |||
430 | give_sigsegv: | ||
431 | force_sigsegv(sig, current); | ||
432 | return -EFAULT; | ||
433 | } | ||
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c new file mode 100644 index 000000000000..e44d441e3f3f --- /dev/null +++ b/arch/tile/kernel/early_printk.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/console.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | static void early_hv_write(struct console *con, const char *s, unsigned n) | ||
23 | { | ||
24 | hv_console_write((HV_VirtAddr) s, n); | ||
25 | } | ||
26 | |||
27 | static struct console early_hv_console = { | ||
28 | .name = "earlyhv", | ||
29 | .write = early_hv_write, | ||
30 | .flags = CON_PRINTBUFFER, | ||
31 | .index = -1, | ||
32 | }; | ||
33 | |||
34 | /* Direct interface for emergencies */ | ||
35 | struct console *early_console = &early_hv_console; | ||
36 | static int early_console_initialized; | ||
37 | static int early_console_complete; | ||
38 | |||
39 | static void early_vprintk(const char *fmt, va_list ap) | ||
40 | { | ||
41 | char buf[512]; | ||
42 | int n = vscnprintf(buf, sizeof(buf), fmt, ap); | ||
43 | early_console->write(early_console, buf, n); | ||
44 | } | ||
45 | |||
46 | void early_printk(const char *fmt, ...) | ||
47 | { | ||
48 | va_list ap; | ||
49 | va_start(ap, fmt); | ||
50 | early_vprintk(fmt, ap); | ||
51 | va_end(ap); | ||
52 | } | ||
53 | |||
54 | void early_panic(const char *fmt, ...) | ||
55 | { | ||
56 | va_list ap; | ||
57 | raw_local_irq_disable_all(); | ||
58 | va_start(ap, fmt); | ||
59 | early_printk("Kernel panic - not syncing: "); | ||
60 | early_vprintk(fmt, ap); | ||
61 | early_console->write(early_console, "\n", 1); | ||
62 | va_end(ap); | ||
63 | dump_stack(); | ||
64 | hv_halt(); | ||
65 | } | ||
66 | |||
67 | static int __initdata keep_early; | ||
68 | |||
69 | static int __init setup_early_printk(char *str) | ||
70 | { | ||
71 | if (early_console_initialized) | ||
72 | return 1; | ||
73 | |||
74 | if (str != NULL && strncmp(str, "keep", 4) == 0) | ||
75 | keep_early = 1; | ||
76 | |||
77 | early_console = &early_hv_console; | ||
78 | early_console_initialized = 1; | ||
79 | register_console(early_console); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | void __init disable_early_printk(void) | ||
85 | { | ||
86 | early_console_complete = 1; | ||
87 | if (!early_console_initialized || !early_console) | ||
88 | return; | ||
89 | if (!keep_early) { | ||
90 | early_printk("disabling early console\n"); | ||
91 | unregister_console(early_console); | ||
92 | early_console_initialized = 0; | ||
93 | } else { | ||
94 | early_printk("keeping early console\n"); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | void warn_early_printk(void) | ||
99 | { | ||
100 | if (early_console_complete || early_console_initialized) | ||
101 | return; | ||
102 | early_printk("\ | ||
103 | Machine shutting down before console output is fully initialized.\n\ | ||
104 | You may wish to reboot and add the option 'earlyprintk' to your\n\ | ||
105 | boot command line to see any diagnostic early console output.\n\ | ||
106 | "); | ||
107 | } | ||
108 | |||
109 | early_param("earlyprintk", setup_early_printk); | ||
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S new file mode 100644 index 000000000000..136261f7d7f9 --- /dev/null +++ b/arch/tile/kernel/entry.S | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <arch/abi.h> | ||
17 | #include <asm/unistd.h> | ||
18 | #include <asm/irqflags.h> | ||
19 | |||
20 | #ifdef __tilegx__ | ||
21 | #define bnzt bnezt | ||
22 | #endif | ||
23 | |||
24 | STD_ENTRY(current_text_addr) | ||
25 | { move r0, lr; jrp lr } | ||
26 | STD_ENDPROC(current_text_addr) | ||
27 | |||
28 | STD_ENTRY(_sim_syscall) | ||
29 | /* | ||
30 | * Wait for r0-r9 to be ready (and lr on the off chance we | ||
31 | * want the syscall to locate its caller), then make a magic | ||
32 | * simulator syscall. | ||
33 | * | ||
34 | * We carefully stall until the registers are readable in case they | ||
35 | * are the target of a slow load, etc. so that tile-sim will | ||
36 | * definitely be able to read all of them inside the magic syscall. | ||
37 | * | ||
38 | * Technically this is wrong for r3-r9 and lr, since an interrupt | ||
39 | * could come in and restore the registers with a slow load right | ||
40 | * before executing the mtspr. We may need to modify tile-sim to | ||
41 | * explicitly stall for this case, but we do not yet have | ||
42 | * a way to implement such a stall. | ||
43 | */ | ||
44 | { and zero, lr, r9 ; and zero, r8, r7 } | ||
45 | { and zero, r6, r5 ; and zero, r4, r3 } | ||
46 | { and zero, r2, r1 ; mtspr SIM_CONTROL, r0 } | ||
47 | { jrp lr } | ||
48 | STD_ENDPROC(_sim_syscall) | ||
49 | |||
50 | /* | ||
51 | * Implement execve(). The i386 code has a note that forking from kernel | ||
52 | * space results in no copy on write until the execve, so we should be | ||
53 | * careful not to write to the stack here. | ||
54 | */ | ||
55 | STD_ENTRY(kernel_execve) | ||
56 | moveli TREG_SYSCALL_NR_NAME, __NR_execve | ||
57 | swint1 | ||
58 | jrp lr | ||
59 | STD_ENDPROC(kernel_execve) | ||
60 | |||
61 | /* Delay a fixed number of cycles. */ | ||
62 | STD_ENTRY(__delay) | ||
63 | { addi r0, r0, -1; bnzt r0, . } | ||
64 | jrp lr | ||
65 | STD_ENDPROC(__delay) | ||
66 | |||
67 | /* | ||
68 | * We don't run this function directly, but instead copy it to a page | ||
69 | * we map into every user process. See vdso_setup(). | ||
70 | * | ||
71 | * Note that libc has a copy of this function that it uses to compare | ||
72 | * against the PC when a stack backtrace ends, so if this code is | ||
73 | * changed, the libc implementation(s) should also be updated. | ||
74 | */ | ||
75 | .pushsection .data | ||
76 | ENTRY(__rt_sigreturn) | ||
77 | moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn | ||
78 | swint1 | ||
79 | ENDPROC(__rt_sigreturn) | ||
80 | ENTRY(__rt_sigreturn_end) | ||
81 | .popsection | ||
82 | |||
83 | STD_ENTRY(dump_stack) | ||
84 | { move r2, lr; lnk r1 } | ||
85 | { move r4, r52; addli r1, r1, dump_stack - . } | ||
86 | { move r3, sp; j _dump_stack } | ||
87 | jrp lr /* keep backtracer happy */ | ||
88 | STD_ENDPROC(dump_stack) | ||
89 | |||
90 | STD_ENTRY(KBacktraceIterator_init_current) | ||
91 | { move r2, lr; lnk r1 } | ||
92 | { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } | ||
93 | { move r3, sp; j _KBacktraceIterator_init_current } | ||
94 | jrp lr /* keep backtracer happy */ | ||
95 | STD_ENDPROC(KBacktraceIterator_init_current) | ||
96 | |||
97 | /* | ||
98 | * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then | ||
99 | * free the old stack (passed in r0) and re-invoke cpu_idle(). | ||
100 | * We update sp and ksp0 simultaneously to avoid backtracer warnings. | ||
101 | */ | ||
102 | STD_ENTRY(cpu_idle_on_new_stack) | ||
103 | { | ||
104 | move sp, r1 | ||
105 | mtspr SYSTEM_SAVE_1_0, r2 | ||
106 | } | ||
107 | jal free_thread_info | ||
108 | j cpu_idle | ||
109 | STD_ENDPROC(cpu_idle_on_new_stack) | ||
110 | |||
111 | /* Loop forever on a nap during SMP boot. */ | ||
112 | STD_ENTRY(smp_nap) | ||
113 | nap | ||
114 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ | ||
115 | jrp lr /* clue in the backtracer */ | ||
116 | STD_ENDPROC(smp_nap) | ||
117 | |||
118 | /* | ||
119 | * Enable interrupts racelessly and then nap until interrupted. | ||
120 | * This function's _cpu_idle_nap address is special; see intvec.S. | ||
121 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | ||
122 | * as a result return to the function that called _cpu_idle(). | ||
123 | */ | ||
124 | STD_ENTRY(_cpu_idle) | ||
125 | { | ||
126 | lnk r0 | ||
127 | movei r1, 1 | ||
128 | } | ||
129 | { | ||
130 | addli r0, r0, _cpu_idle_nap - . | ||
131 | mtspr INTERRUPT_CRITICAL_SECTION, r1 | ||
132 | } | ||
133 | IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ | ||
134 | mtspr EX_CONTEXT_1_1, r1 /* PL1, ICS clear */ | ||
135 | mtspr EX_CONTEXT_1_0, r0 | ||
136 | iret | ||
137 | .global _cpu_idle_nap | ||
138 | _cpu_idle_nap: | ||
139 | nap | ||
140 | jrp lr | ||
141 | STD_ENDPROC(_cpu_idle) | ||
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S new file mode 100644 index 000000000000..2b4f6c091701 --- /dev/null +++ b/arch/tile/kernel/head_32.S | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE startup code. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/processor.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | #include <hv/hypervisor.h> | ||
25 | #include <arch/chip.h> | ||
26 | |||
27 | /* | ||
28 | * This module contains the entry code for kernel images. It performs the | ||
29 | * minimal setup needed to call the generic C routines. | ||
30 | */ | ||
31 | |||
32 | __HEAD | ||
33 | ENTRY(_start) | ||
34 | /* Notify the hypervisor of what version of the API we want */ | ||
35 | { | ||
36 | movei r1, TILE_CHIP | ||
37 | movei r2, TILE_CHIP_REV | ||
38 | } | ||
39 | { | ||
40 | moveli r0, _HV_VERSION | ||
41 | jal hv_init | ||
42 | } | ||
43 | /* Get a reasonable default ASID in r0 */ | ||
44 | { | ||
45 | move r0, zero | ||
46 | jal hv_inquire_asid | ||
47 | } | ||
48 | /* Install the default page table */ | ||
49 | { | ||
50 | moveli r6, lo16(swapper_pgprot - PAGE_OFFSET) | ||
51 | move r4, r0 /* use starting ASID of range for this page table */ | ||
52 | } | ||
53 | { | ||
54 | moveli r0, lo16(swapper_pg_dir - PAGE_OFFSET) | ||
55 | auli r6, r6, ha16(swapper_pgprot - PAGE_OFFSET) | ||
56 | } | ||
57 | { | ||
58 | lw r2, r6 | ||
59 | addi r6, r6, 4 | ||
60 | } | ||
61 | { | ||
62 | lw r3, r6 | ||
63 | auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) | ||
64 | } | ||
65 | { | ||
66 | inv r6 | ||
67 | move r1, zero /* high 32 bits of CPA is zero */ | ||
68 | } | ||
69 | { | ||
70 | moveli lr, lo16(1f) | ||
71 | move r5, zero | ||
72 | } | ||
73 | { | ||
74 | auli lr, lr, ha16(1f) | ||
75 | j hv_install_context | ||
76 | } | ||
77 | 1: | ||
78 | |||
79 | /* Get our processor number and save it away in SAVE_1_0. */ | ||
80 | jal hv_inquire_topology | ||
81 | mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ | ||
82 | add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ | ||
83 | |||
84 | #ifdef CONFIG_SMP | ||
85 | /* | ||
86 | * Load up our per-cpu offset. When the first (master) tile | ||
87 | * boots, this value is still zero, so we will load boot_pc | ||
88 | * with start_kernel, and boot_sp with init_stack + THREAD_SIZE. | ||
89 | * The master tile initializes the per-cpu offset array, so that | ||
90 | * when subsequent (secondary) tiles boot, they will instead load | ||
91 | * from their per-cpu versions of boot_sp and boot_pc. | ||
92 | */ | ||
93 | moveli r5, lo16(__per_cpu_offset) | ||
94 | auli r5, r5, ha16(__per_cpu_offset) | ||
95 | s2a r5, r4, r5 | ||
96 | lw r5, r5 | ||
97 | bnz r5, 1f | ||
98 | |||
99 | /* | ||
100 | * Save the width and height to the smp_topology variable | ||
101 | * for later use. | ||
102 | */ | ||
103 | moveli r0, lo16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) | ||
104 | auli r0, r0, ha16(smp_topology + HV_TOPOLOGY_WIDTH_OFFSET) | ||
105 | { | ||
106 | sw r0, r2 | ||
107 | addi r0, r0, (HV_TOPOLOGY_HEIGHT_OFFSET - HV_TOPOLOGY_WIDTH_OFFSET) | ||
108 | } | ||
109 | sw r0, r3 | ||
110 | 1: | ||
111 | #else | ||
112 | move r5, zero | ||
113 | #endif | ||
114 | |||
115 | /* Load and go with the correct pc and sp. */ | ||
116 | { | ||
117 | addli r1, r5, lo16(boot_sp) | ||
118 | addli r0, r5, lo16(boot_pc) | ||
119 | } | ||
120 | { | ||
121 | auli r1, r1, ha16(boot_sp) | ||
122 | auli r0, r0, ha16(boot_pc) | ||
123 | } | ||
124 | lw r0, r0 | ||
125 | lw sp, r1 | ||
126 | or r4, sp, r4 | ||
127 | mtspr SYSTEM_SAVE_1_0, r4 /* save ksp0 + cpu */ | ||
128 | addi sp, sp, -STACK_TOP_DELTA | ||
129 | { | ||
130 | move lr, zero /* stop backtraces in the called function */ | ||
131 | jr r0 | ||
132 | } | ||
133 | ENDPROC(_start) | ||
134 | |||
135 | .section ".bss.page_aligned","w" | ||
136 | .align PAGE_SIZE | ||
137 | ENTRY(empty_zero_page) | ||
138 | .fill PAGE_SIZE,1,0 | ||
139 | END(empty_zero_page) | ||
140 | |||
141 | .macro PTE va, cpa, bits1, no_org=0 | ||
142 | .ifeq \no_org | ||
143 | .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE | ||
144 | .endif | ||
145 | .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ | ||
146 | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) | ||
147 | .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) | ||
148 | .endm | ||
149 | |||
150 | .section ".data.page_aligned","wa" | ||
151 | .align PAGE_SIZE | ||
152 | ENTRY(swapper_pg_dir) | ||
153 | /* | ||
154 | * All data pages from PAGE_OFFSET to MEM_USER_INTRPT are mapped as | ||
155 | * VA = PA + PAGE_OFFSET. We remap things with more precise access | ||
156 | * permissions and more respect for size of RAM later. | ||
157 | */ | ||
158 | .set addr, 0 | ||
159 | .rept (MEM_USER_INTRPT - PAGE_OFFSET) >> PGDIR_SHIFT | ||
160 | PTE addr + PAGE_OFFSET, addr, HV_PTE_READABLE | HV_PTE_WRITABLE | ||
161 | .set addr, addr + PGDIR_SIZE | ||
162 | .endr | ||
163 | |||
164 | /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ | ||
165 | PTE MEM_SV_INTRPT, 0, HV_PTE_READABLE | HV_PTE_EXECUTABLE | ||
166 | .org swapper_pg_dir + HV_L1_SIZE | ||
167 | END(swapper_pg_dir) | ||
168 | |||
169 | /* | ||
170 | * Isolate swapper_pgprot to its own cache line, since each cpu | ||
171 | * starting up will read it using VA-is-PA and local homing. | ||
172 | * This would otherwise likely conflict with other data on the cache | ||
173 | * line, once we have set its permanent home in the page tables. | ||
174 | */ | ||
175 | __INITDATA | ||
176 | .align CHIP_L2_LINE_SIZE() | ||
177 | ENTRY(swapper_pgprot) | ||
178 | PTE 0, 0, HV_PTE_READABLE | HV_PTE_WRITABLE, 1 | ||
179 | .align CHIP_L2_LINE_SIZE() | ||
180 | END(swapper_pgprot) | ||
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds new file mode 100644 index 000000000000..698489b4c7ab --- /dev/null +++ b/arch/tile/kernel/hvglue.lds | |||
@@ -0,0 +1,56 @@ | |||
1 | /* Hypervisor call vector addresses; see <hv/hypervisor.h> */ | ||
2 | hv_init = TEXT_OFFSET + 0x10020; | ||
3 | hv_install_context = TEXT_OFFSET + 0x10040; | ||
4 | hv_sysconf = TEXT_OFFSET + 0x10060; | ||
5 | hv_get_rtc = TEXT_OFFSET + 0x10080; | ||
6 | hv_set_rtc = TEXT_OFFSET + 0x100a0; | ||
7 | hv_flush_asid = TEXT_OFFSET + 0x100c0; | ||
8 | hv_flush_page = TEXT_OFFSET + 0x100e0; | ||
9 | hv_flush_pages = TEXT_OFFSET + 0x10100; | ||
10 | hv_restart = TEXT_OFFSET + 0x10120; | ||
11 | hv_halt = TEXT_OFFSET + 0x10140; | ||
12 | hv_power_off = TEXT_OFFSET + 0x10160; | ||
13 | hv_inquire_physical = TEXT_OFFSET + 0x10180; | ||
14 | hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0; | ||
15 | hv_inquire_virtual = TEXT_OFFSET + 0x101c0; | ||
16 | hv_inquire_asid = TEXT_OFFSET + 0x101e0; | ||
17 | hv_nanosleep = TEXT_OFFSET + 0x10200; | ||
18 | hv_console_read_if_ready = TEXT_OFFSET + 0x10220; | ||
19 | hv_console_write = TEXT_OFFSET + 0x10240; | ||
20 | hv_downcall_dispatch = TEXT_OFFSET + 0x10260; | ||
21 | hv_inquire_topology = TEXT_OFFSET + 0x10280; | ||
22 | hv_fs_findfile = TEXT_OFFSET + 0x102a0; | ||
23 | hv_fs_fstat = TEXT_OFFSET + 0x102c0; | ||
24 | hv_fs_pread = TEXT_OFFSET + 0x102e0; | ||
25 | hv_physaddr_read64 = TEXT_OFFSET + 0x10300; | ||
26 | hv_physaddr_write64 = TEXT_OFFSET + 0x10320; | ||
27 | hv_get_command_line = TEXT_OFFSET + 0x10340; | ||
28 | hv_set_caching = TEXT_OFFSET + 0x10360; | ||
29 | hv_bzero_page = TEXT_OFFSET + 0x10380; | ||
30 | hv_register_message_state = TEXT_OFFSET + 0x103a0; | ||
31 | hv_send_message = TEXT_OFFSET + 0x103c0; | ||
32 | hv_receive_message = TEXT_OFFSET + 0x103e0; | ||
33 | hv_inquire_context = TEXT_OFFSET + 0x10400; | ||
34 | hv_start_all_tiles = TEXT_OFFSET + 0x10420; | ||
35 | hv_dev_open = TEXT_OFFSET + 0x10440; | ||
36 | hv_dev_close = TEXT_OFFSET + 0x10460; | ||
37 | hv_dev_pread = TEXT_OFFSET + 0x10480; | ||
38 | hv_dev_pwrite = TEXT_OFFSET + 0x104a0; | ||
39 | hv_dev_poll = TEXT_OFFSET + 0x104c0; | ||
40 | hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0; | ||
41 | hv_dev_preada = TEXT_OFFSET + 0x10500; | ||
42 | hv_dev_pwritea = TEXT_OFFSET + 0x10520; | ||
43 | hv_flush_remote = TEXT_OFFSET + 0x10540; | ||
44 | hv_console_putc = TEXT_OFFSET + 0x10560; | ||
45 | hv_inquire_tiles = TEXT_OFFSET + 0x10580; | ||
46 | hv_confstr = TEXT_OFFSET + 0x105a0; | ||
47 | hv_reexec = TEXT_OFFSET + 0x105c0; | ||
48 | hv_set_command_line = TEXT_OFFSET + 0x105e0; | ||
49 | hv_dev_register_intr_state = TEXT_OFFSET + 0x10600; | ||
50 | hv_enable_intr = TEXT_OFFSET + 0x10620; | ||
51 | hv_disable_intr = TEXT_OFFSET + 0x10640; | ||
52 | hv_trigger_ipi = TEXT_OFFSET + 0x10660; | ||
53 | hv_store_mapping = TEXT_OFFSET + 0x10680; | ||
54 | hv_inquire_realpa = TEXT_OFFSET + 0x106a0; | ||
55 | hv_flush_all = TEXT_OFFSET + 0x106c0; | ||
56 | hv_glue_internals = TEXT_OFFSET + 0x106e0; | ||
diff --git a/arch/tile/kernel/init_task.c b/arch/tile/kernel/init_task.c new file mode 100644 index 000000000000..928b31870669 --- /dev/null +++ b/arch/tile/kernel/init_task.c | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/init_task.h> | ||
18 | #include <linux/mqueue.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/start_kernel.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | ||
24 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | ||
25 | |||
26 | /* | ||
27 | * Initial thread structure. | ||
28 | * | ||
29 | * We need to make sure that this is THREAD_SIZE aligned due to the | ||
30 | * way process stacks are handled. This is done by having a special | ||
31 | * "init_task" linker map entry.. | ||
32 | */ | ||
33 | union thread_union init_thread_union __init_task_data = { | ||
34 | INIT_THREAD_INFO(init_task) | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Initial task structure. | ||
39 | * | ||
40 | * All other task structs will be allocated on slabs in fork.c | ||
41 | */ | ||
42 | struct task_struct init_task = INIT_TASK(init_task); | ||
43 | EXPORT_SYMBOL(init_task); | ||
44 | |||
45 | /* | ||
46 | * per-CPU stack and boot info. | ||
47 | */ | ||
48 | DEFINE_PER_CPU(unsigned long, boot_sp) = | ||
49 | (unsigned long)init_stack + THREAD_SIZE; | ||
50 | |||
51 | #ifdef CONFIG_SMP | ||
52 | DEFINE_PER_CPU(unsigned long, boot_pc) = (unsigned long)start_kernel; | ||
53 | #else | ||
54 | /* | ||
55 | * The variable must be __initdata since it references __init code. | ||
56 | * With CONFIG_SMP it is per-cpu data, which is exempt from validation. | ||
57 | */ | ||
58 | unsigned long __initdata boot_pc = (unsigned long)start_kernel; | ||
59 | #endif | ||
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S new file mode 100644 index 000000000000..207271f0cce1 --- /dev/null +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -0,0 +1,2006 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Linux interrupt vectors. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/thread_info.h> | ||
22 | #include <asm/unistd.h> | ||
23 | #include <asm/irqflags.h> | ||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/asm-offsets.h> | ||
26 | #include <hv/hypervisor.h> | ||
27 | #include <arch/abi.h> | ||
28 | #include <arch/interrupts.h> | ||
29 | #include <arch/spr_def.h> | ||
30 | |||
31 | #ifdef CONFIG_PREEMPT | ||
32 | # error "No support for kernel preemption currently" | ||
33 | #endif | ||
34 | |||
35 | #if INT_INTCTRL_1 < 32 || INT_INTCTL_1 >= 48 | ||
36 | # error INT_INTCTRL_1 coded to set high interrupt mask | ||
37 | #endif | ||
38 | |||
39 | #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) | ||
40 | |||
41 | #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) | ||
42 | |||
43 | #if !CHIP_HAS_WH64() | ||
44 | /* By making this an empty macro, we can use wh64 in the code. */ | ||
45 | .macro wh64 reg | ||
46 | .endm | ||
47 | #endif | ||
48 | |||
49 | .macro push_reg reg, ptr=sp, delta=-4 | ||
50 | { | ||
51 | sw \ptr, \reg | ||
52 | addli \ptr, \ptr, \delta | ||
53 | } | ||
54 | .endm | ||
55 | |||
56 | .macro pop_reg reg, ptr=sp, delta=4 | ||
57 | { | ||
58 | lw \reg, \ptr | ||
59 | addli \ptr, \ptr, \delta | ||
60 | } | ||
61 | .endm | ||
62 | |||
63 | .macro pop_reg_zero reg, zreg, ptr=sp, delta=4 | ||
64 | { | ||
65 | move \zreg, zero | ||
66 | lw \reg, \ptr | ||
67 | addi \ptr, \ptr, \delta | ||
68 | } | ||
69 | .endm | ||
70 | |||
71 | .macro push_extra_callee_saves reg | ||
72 | PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51)) | ||
73 | push_reg r51, \reg | ||
74 | push_reg r50, \reg | ||
75 | push_reg r49, \reg | ||
76 | push_reg r48, \reg | ||
77 | push_reg r47, \reg | ||
78 | push_reg r46, \reg | ||
79 | push_reg r45, \reg | ||
80 | push_reg r44, \reg | ||
81 | push_reg r43, \reg | ||
82 | push_reg r42, \reg | ||
83 | push_reg r41, \reg | ||
84 | push_reg r40, \reg | ||
85 | push_reg r39, \reg | ||
86 | push_reg r38, \reg | ||
87 | push_reg r37, \reg | ||
88 | push_reg r36, \reg | ||
89 | push_reg r35, \reg | ||
90 | push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34) | ||
91 | .endm | ||
92 | |||
93 | .macro panic str | ||
94 | .pushsection .rodata, "a" | ||
95 | 1: | ||
96 | .asciz "\str" | ||
97 | .popsection | ||
98 | { | ||
99 | moveli r0, lo16(1b) | ||
100 | } | ||
101 | { | ||
102 | auli r0, r0, ha16(1b) | ||
103 | jal panic | ||
104 | } | ||
105 | .endm | ||
106 | |||
107 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
108 | .pushsection .text.intvec_feedback,"ax" | ||
109 | intvec_feedback: | ||
110 | .popsection | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * Default interrupt handler. | ||
115 | * | ||
116 | * vecnum is where we'll put this code. | ||
117 | * c_routine is the C routine we'll call. | ||
118 | * | ||
119 | * The C routine is passed two arguments: | ||
120 | * - A pointer to the pt_regs state. | ||
121 | * - The interrupt vector number. | ||
122 | * | ||
123 | * The "processing" argument specifies the code for processing | ||
124 | * the interrupt. Defaults to "handle_interrupt". | ||
125 | */ | ||
126 | .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt | ||
127 | .org (\vecnum << 8) | ||
128 | intvec_\vecname: | ||
129 | .ifc \vecnum, INT_SWINT_1 | ||
130 | blz TREG_SYSCALL_NR_NAME, sys_cmpxchg | ||
131 | .endif | ||
132 | |||
133 | /* Temporarily save a register so we have somewhere to work. */ | ||
134 | |||
135 | mtspr SYSTEM_SAVE_1_1, r0 | ||
136 | mfspr r0, EX_CONTEXT_1_1 | ||
137 | |||
138 | /* The cmpxchg code clears sp to force us to reset it here on fault. */ | ||
139 | { | ||
140 | bz sp, 2f | ||
141 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
142 | } | ||
143 | |||
144 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
145 | /* | ||
146 | * For double-faults from user-space, fall through to the normal | ||
147 | * register save and stack setup path. Otherwise, it's the | ||
148 | * hypervisor giving us one last chance to dump diagnostics, and we | ||
149 | * branch to the kernel_double_fault routine to do so. | ||
150 | */ | ||
151 | bz r0, 1f | ||
152 | j _kernel_double_fault | ||
153 | 1: | ||
154 | .else | ||
155 | /* | ||
156 | * If we're coming from user-space, then set sp to the top of | ||
157 | * the kernel stack. Otherwise, assume sp is already valid. | ||
158 | */ | ||
159 | { | ||
160 | bnz r0, 0f | ||
161 | move r0, sp | ||
162 | } | ||
163 | .endif | ||
164 | |||
165 | .ifc \c_routine, do_page_fault | ||
166 | /* | ||
167 | * The page_fault handler may be downcalled directly by the | ||
168 | * hypervisor even when Linux is running and has ICS set. | ||
169 | * | ||
170 | * In this case the contents of EX_CONTEXT_1_1 reflect the | ||
171 | * previous fault and can't be relied on to choose whether or | ||
172 | * not to reinitialize the stack pointer. So we add a test | ||
173 | * to see whether SYSTEM_SAVE_1_2 has the high bit set, | ||
174 | * and if so we don't reinitialize sp, since we must be coming | ||
175 | * from Linux. (In fact the precise case is !(val & ~1), | ||
176 | * but any Linux PC has to have the high bit set.) | ||
177 | * | ||
178 | * Note that the hypervisor *always* sets SYSTEM_SAVE_1_2 for | ||
179 | * any path that turns into a downcall to one of our TLB handlers. | ||
180 | */ | ||
181 | mfspr r0, SYSTEM_SAVE_1_2 | ||
182 | { | ||
183 | blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */ | ||
184 | move r0, sp | ||
185 | } | ||
186 | .endif | ||
187 | |||
188 | 2: | ||
189 | /* | ||
190 | * SYSTEM_SAVE_1_0 holds the cpu number in the low bits, and | ||
191 | * the current stack top in the higher bits. So we recover | ||
192 | * our stack top by just masking off the low bits, then | ||
193 | * point sp at the top aligned address on the actual stack page. | ||
194 | */ | ||
195 | mfspr r0, SYSTEM_SAVE_1_0 | ||
196 | mm r0, r0, zero, LOG2_THREAD_SIZE, 31 | ||
197 | |||
198 | 0: | ||
199 | /* | ||
200 | * Align the stack mod 64 so we can properly predict what | ||
201 | * cache lines we need to write-hint to reduce memory fetch | ||
202 | * latency as we enter the kernel. The layout of memory is | ||
203 | * as follows, with cache line 0 at the lowest VA, and cache | ||
204 | * line 4 just below the r0 value this "andi" computes. | ||
205 | * Note that we never write to cache line 4, and we skip | ||
206 | * cache line 1 for syscalls. | ||
207 | * | ||
208 | * cache line 4: ptregs padding (two words) | ||
209 | * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad | ||
210 | * cache line 2: r30...r45 | ||
211 | * cache line 1: r14...r29 | ||
212 | * cache line 0: 2 x frame, r0..r13 | ||
213 | */ | ||
214 | andi r0, r0, -64 | ||
215 | |||
216 | /* | ||
217 | * Push the first four registers on the stack, so that we can set | ||
218 | * them to vector-unique values before we jump to the common code. | ||
219 | * | ||
220 | * Registers are pushed on the stack as a struct pt_regs, | ||
221 | * with the sp initially just above the struct, and when we're | ||
222 | * done, sp points to the base of the struct, minus | ||
223 | * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code. | ||
224 | * | ||
225 | * This routine saves just the first four registers, plus the | ||
226 | * stack context so we can do proper backtracing right away, | ||
227 | * and defers to handle_interrupt to save the rest. | ||
228 | * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. | ||
229 | */ | ||
230 | addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) | ||
231 | wh64 r0 /* cache line 3 */ | ||
232 | { | ||
233 | sw r0, lr | ||
234 | addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
235 | } | ||
236 | { | ||
237 | sw r0, sp | ||
238 | addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP | ||
239 | } | ||
240 | { | ||
241 | sw sp, r52 | ||
242 | addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52) | ||
243 | } | ||
244 | wh64 sp /* cache line 0 */ | ||
245 | { | ||
246 | sw sp, r1 | ||
247 | addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1) | ||
248 | } | ||
249 | { | ||
250 | sw sp, r2 | ||
251 | addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2) | ||
252 | } | ||
253 | { | ||
254 | sw sp, r3 | ||
255 | addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3) | ||
256 | } | ||
257 | mfspr r0, EX_CONTEXT_1_0 | ||
258 | .ifc \processing,handle_syscall | ||
259 | /* | ||
260 | * Bump the saved PC by one bundle so that when we return, we won't | ||
261 | * execute the same swint instruction again. We need to do this while | ||
262 | * we're in the critical section. | ||
263 | */ | ||
264 | addi r0, r0, 8 | ||
265 | .endif | ||
266 | { | ||
267 | sw sp, r0 | ||
268 | addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
269 | } | ||
270 | mfspr r0, EX_CONTEXT_1_1 | ||
271 | { | ||
272 | sw sp, r0 | ||
273 | addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
274 | /* | ||
275 | * Use r0 for syscalls so it's a temporary; use r1 for interrupts | ||
276 | * so that it gets passed through unchanged to the handler routine. | ||
277 | * Note that the .if conditional confusingly spans bundles. | ||
278 | */ | ||
279 | .ifc \processing,handle_syscall | ||
280 | movei r0, \vecnum | ||
281 | } | ||
282 | { | ||
283 | sw sp, r0 | ||
284 | .else | ||
285 | movei r1, \vecnum | ||
286 | } | ||
287 | { | ||
288 | sw sp, r1 | ||
289 | .endif | ||
290 | addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM | ||
291 | } | ||
292 | mfspr r0, SYSTEM_SAVE_1_1 /* Original r0 */ | ||
293 | { | ||
294 | sw sp, r0 | ||
295 | addi sp, sp, -PTREGS_OFFSET_REG(0) - 4 | ||
296 | } | ||
297 | { | ||
298 | sw sp, zero /* write zero into "Next SP" frame pointer */ | ||
299 | addi sp, sp, -4 /* leave SP pointing at bottom of frame */ | ||
300 | } | ||
301 | .ifc \processing,handle_syscall | ||
302 | j handle_syscall | ||
303 | .else | ||
304 | /* | ||
305 | * Capture per-interrupt SPR context to registers. | ||
306 | * We overload the meaning of r3 on this path such that if its bit 31 | ||
307 | * is set, we have to mask all interrupts including NMIs before | ||
308 | * clearing the interrupt critical section bit. | ||
309 | * See discussion below at "finish_interrupt_save". | ||
310 | */ | ||
311 | .ifc \c_routine, do_page_fault | ||
312 | mfspr r2, SYSTEM_SAVE_1_3 /* address of page fault */ | ||
313 | mfspr r3, SYSTEM_SAVE_1_2 /* info about page fault */ | ||
314 | .else | ||
315 | .ifc \vecnum, INT_DOUBLE_FAULT | ||
316 | { | ||
317 | mfspr r2, SYSTEM_SAVE_1_2 /* double fault info from HV */ | ||
318 | movei r3, 0 | ||
319 | } | ||
320 | .else | ||
321 | .ifc \c_routine, do_trap | ||
322 | { | ||
323 | mfspr r2, GPV_REASON | ||
324 | movei r3, 0 | ||
325 | } | ||
326 | .else | ||
327 | .ifc \c_routine, op_handle_perf_interrupt | ||
328 | { | ||
329 | mfspr r2, PERF_COUNT_STS | ||
330 | movei r3, -1 /* not used, but set for consistency */ | ||
331 | } | ||
332 | .else | ||
333 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
334 | .ifc \c_routine, op_handle_aux_perf_interrupt | ||
335 | { | ||
336 | mfspr r2, AUX_PERF_COUNT_STS | ||
337 | movei r3, -1 /* not used, but set for consistency */ | ||
338 | } | ||
339 | .else | ||
340 | #endif | ||
341 | movei r3, 0 | ||
342 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
343 | .endif | ||
344 | #endif | ||
345 | .endif | ||
346 | .endif | ||
347 | .endif | ||
348 | .endif | ||
349 | /* Put function pointer in r0 */ | ||
350 | moveli r0, lo16(\c_routine) | ||
351 | { | ||
352 | auli r0, r0, ha16(\c_routine) | ||
353 | j \processing | ||
354 | } | ||
355 | .endif | ||
356 | ENDPROC(intvec_\vecname) | ||
357 | |||
358 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
359 | .pushsection .text.intvec_feedback,"ax" | ||
360 | .org (\vecnum << 5) | ||
361 | FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) | ||
362 | jrp lr | ||
363 | .popsection | ||
364 | #endif | ||
365 | |||
366 | .endm | ||
367 | |||
368 | |||
369 | /* | ||
370 | * Save the rest of the registers that we didn't save in the actual | ||
371 | * vector itself. We can't use r0-r10 inclusive here. | ||
372 | */ | ||
373 | .macro finish_interrupt_save, function | ||
374 | |||
375 | /* If it's a syscall, save a proper orig_r0, otherwise just zero. */ | ||
376 | PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0) | ||
377 | { | ||
378 | .ifc \function,handle_syscall | ||
379 | sw r52, r0 | ||
380 | .else | ||
381 | sw r52, zero | ||
382 | .endif | ||
383 | PTREGS_PTR(r52, PTREGS_OFFSET_TP) | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | * For ordinary syscalls, we save neither caller- nor callee- | ||
388 | * save registers, since the syscall invoker doesn't expect the | ||
389 | * caller-saves to be saved, and the called kernel functions will | ||
390 | * take care of saving the callee-saves for us. | ||
391 | * | ||
392 | * For interrupts we save just the caller-save registers. Saving | ||
393 | * them is required (since the "caller" can't save them). Again, | ||
394 | * the called kernel functions will restore the callee-save | ||
395 | * registers for us appropriately. | ||
396 | * | ||
397 | * On return, we normally restore nothing special for syscalls, | ||
398 | * and just the caller-save registers for interrupts. | ||
399 | * | ||
400 | * However, there are some important caveats to all this: | ||
401 | * | ||
402 | * - We always save a few callee-save registers to give us | ||
403 | * some scratchpad registers to carry across function calls. | ||
404 | * | ||
405 | * - fork/vfork/etc require us to save all the callee-save | ||
406 | * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below. | ||
407 | * | ||
408 | * - We always save r0..r5 and r10 for syscalls, since we need | ||
409 | * to reload them a bit later for the actual kernel call, and | ||
410 | * since we might need them for -ERESTARTNOINTR, etc. | ||
411 | * | ||
412 | * - Before invoking a signal handler, we save the unsaved | ||
413 | * callee-save registers so they are visible to the | ||
414 | * signal handler or any ptracer. | ||
415 | * | ||
416 | * - If the unsaved callee-save registers are modified, we set | ||
417 | * a bit in pt_regs so we know to reload them from pt_regs | ||
418 | * and not just rely on the kernel function unwinding. | ||
419 | * (Done for ptrace register writes and SA_SIGINFO handler.) | ||
420 | */ | ||
421 | { | ||
422 | sw r52, tp | ||
423 | PTREGS_PTR(r52, PTREGS_OFFSET_REG(33)) | ||
424 | } | ||
425 | wh64 r52 /* cache line 2 */ | ||
426 | push_reg r33, r52 | ||
427 | push_reg r32, r52 | ||
428 | push_reg r31, r52 | ||
429 | .ifc \function,handle_syscall | ||
430 | push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30) | ||
431 | push_reg TREG_SYSCALL_NR_NAME, r52, \ | ||
432 | PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL | ||
433 | .else | ||
434 | |||
435 | push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30) | ||
436 | wh64 r52 /* cache line 1 */ | ||
437 | push_reg r29, r52 | ||
438 | push_reg r28, r52 | ||
439 | push_reg r27, r52 | ||
440 | push_reg r26, r52 | ||
441 | push_reg r25, r52 | ||
442 | push_reg r24, r52 | ||
443 | push_reg r23, r52 | ||
444 | push_reg r22, r52 | ||
445 | push_reg r21, r52 | ||
446 | push_reg r20, r52 | ||
447 | push_reg r19, r52 | ||
448 | push_reg r18, r52 | ||
449 | push_reg r17, r52 | ||
450 | push_reg r16, r52 | ||
451 | push_reg r15, r52 | ||
452 | push_reg r14, r52 | ||
453 | push_reg r13, r52 | ||
454 | push_reg r12, r52 | ||
455 | push_reg r11, r52 | ||
456 | push_reg r10, r52 | ||
457 | push_reg r9, r52 | ||
458 | push_reg r8, r52 | ||
459 | push_reg r7, r52 | ||
460 | push_reg r6, r52 | ||
461 | |||
462 | .endif | ||
463 | |||
464 | push_reg r5, r52 | ||
465 | sw r52, r4 | ||
466 | |||
467 | /* Load tp with our per-cpu offset. */ | ||
468 | #ifdef CONFIG_SMP | ||
469 | { | ||
470 | mfspr r20, SYSTEM_SAVE_1_0 | ||
471 | moveli r21, lo16(__per_cpu_offset) | ||
472 | } | ||
473 | { | ||
474 | auli r21, r21, ha16(__per_cpu_offset) | ||
475 | mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1 | ||
476 | } | ||
477 | s2a r20, r20, r21 | ||
478 | lw tp, r20 | ||
479 | #else | ||
480 | move tp, zero | ||
481 | #endif | ||
482 | |||
483 | /* | ||
484 | * If we will be returning to the kernel, we will need to | ||
485 | * reset the interrupt masks to the state they had before. | ||
486 | * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled. | ||
487 | * We load flags in r32 here so we can jump to .Lrestore_regs | ||
488 | * directly after do_page_fault_ics() if necessary. | ||
489 | */ | ||
490 | mfspr r32, EX_CONTEXT_1_1 | ||
491 | { | ||
492 | andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
493 | PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) | ||
494 | } | ||
495 | bzt r32, 1f /* zero if from user space */ | ||
496 | IRQS_DISABLED(r32) /* zero if irqs enabled */ | ||
497 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
498 | # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix | ||
499 | #endif | ||
500 | 1: | ||
501 | .ifnc \function,handle_syscall | ||
502 | /* Record the fact that we saved the caller-save registers above. */ | ||
503 | ori r32, r32, PT_FLAGS_CALLER_SAVES | ||
504 | .endif | ||
505 | sw r21, r32 | ||
506 | |||
507 | #ifdef __COLLECT_LINKER_FEEDBACK__ | ||
508 | /* | ||
509 | * Notify the feedback routines that we were in the | ||
510 | * appropriate fixed interrupt vector area. Note that we | ||
511 | * still have ICS set at this point, so we can't invoke any | ||
512 | * atomic operations or we will panic. The feedback | ||
513 | * routines internally preserve r0..r10 and r30 up. | ||
514 | */ | ||
515 | .ifnc \function,handle_syscall | ||
516 | shli r20, r1, 5 | ||
517 | .else | ||
518 | moveli r20, INT_SWINT_1 << 5 | ||
519 | .endif | ||
520 | addli r20, r20, lo16(intvec_feedback) | ||
521 | auli r20, r20, ha16(intvec_feedback) | ||
522 | jalr r20 | ||
523 | |||
524 | /* And now notify the feedback routines that we are here. */ | ||
525 | FEEDBACK_ENTER(\function) | ||
526 | #endif | ||
527 | |||
528 | /* | ||
529 | * we've captured enough state to the stack (including in | ||
530 | * particular our EX_CONTEXT state) that we can now release | ||
531 | * the interrupt critical section and replace it with our | ||
532 | * standard "interrupts disabled" mask value. This allows | ||
533 | * synchronous interrupts (and profile interrupts) to punch | ||
534 | * through from this point onwards. | ||
535 | * | ||
536 | * If bit 31 of r3 is set during a non-NMI interrupt, we know we | ||
537 | * are on the path where the hypervisor has punched through our | ||
538 | * ICS with a page fault, so we call out to do_page_fault_ics() | ||
539 | * to figure out what to do with it. If the fault was in | ||
540 | * an atomic op, we unlock the atomic lock, adjust the | ||
541 | * saved register state a little, and return "zero" in r4, | ||
542 | * falling through into the normal page-fault interrupt code. | ||
543 | * If the fault was in a kernel-space atomic operation, then | ||
544 | * do_page_fault_ics() resolves it itself, returns "one" in r4, | ||
545 | * and as a result goes directly to restoring registers and iret, | ||
546 | * without trying to adjust the interrupt masks at all. | ||
547 | * The do_page_fault_ics() API involves passing and returning | ||
548 | * a five-word struct (in registers) to avoid writing the | ||
549 | * save and restore code here. | ||
550 | */ | ||
551 | .ifc \function,handle_nmi | ||
552 | IRQ_DISABLE_ALL(r20) | ||
553 | .else | ||
554 | .ifnc \function,handle_syscall | ||
555 | bgezt r3, 1f | ||
556 | { | ||
557 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
558 | jal do_page_fault_ics | ||
559 | } | ||
560 | FEEDBACK_REENTER(\function) | ||
561 | bzt r4, 1f | ||
562 | j .Lrestore_regs | ||
563 | 1: | ||
564 | .endif | ||
565 | IRQ_DISABLE(r20, r21) | ||
566 | .endif | ||
567 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
568 | |||
569 | #if CHIP_HAS_WH64() | ||
570 | /* | ||
571 | * Prepare the first 256 stack bytes to be rapidly accessible | ||
572 | * without having to fetch the background data. We don't really | ||
573 | * know how far to write-hint, but kernel stacks generally | ||
574 | * aren't that big, and write-hinting here does take some time. | ||
575 | */ | ||
576 | addi r52, sp, -64 | ||
577 | { | ||
578 | wh64 r52 | ||
579 | addi r52, r52, -64 | ||
580 | } | ||
581 | { | ||
582 | wh64 r52 | ||
583 | addi r52, r52, -64 | ||
584 | } | ||
585 | { | ||
586 | wh64 r52 | ||
587 | addi r52, r52, -64 | ||
588 | } | ||
589 | wh64 r52 | ||
590 | #endif | ||
591 | |||
592 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
593 | .ifnc \function,handle_nmi | ||
594 | /* | ||
595 | * We finally have enough state set up to notify the irq | ||
596 | * tracing code that irqs were disabled on entry to the handler. | ||
597 | * The TRACE_IRQS_OFF call clobbers registers r0-r29. | ||
598 | * For syscalls, we already have the register state saved away | ||
599 | * on the stack, so we don't bother to do any register saves here, | ||
600 | * and later we pop the registers back off the kernel stack. | ||
601 | * For interrupt handlers, save r0-r3 in callee-saved registers. | ||
602 | */ | ||
603 | .ifnc \function,handle_syscall | ||
604 | { move r30, r0; move r31, r1 } | ||
605 | { move r32, r2; move r33, r3 } | ||
606 | .endif | ||
607 | TRACE_IRQS_OFF | ||
608 | .ifnc \function,handle_syscall | ||
609 | { move r0, r30; move r1, r31 } | ||
610 | { move r2, r32; move r3, r33 } | ||
611 | .endif | ||
612 | .endif | ||
613 | #endif | ||
614 | |||
615 | .endm | ||
616 | |||
617 | .macro check_single_stepping, kind, not_single_stepping | ||
618 | /* | ||
619 | * Check for single stepping in user-level priv | ||
620 | * kind can be "normal", "ill", or "syscall" | ||
621 | * At end, if fall-thru | ||
622 | * r29: thread_info->step_state | ||
623 | * r28: &pt_regs->pc | ||
624 | * r27: pt_regs->pc | ||
625 | * r26: thread_info->step_state->buffer | ||
626 | */ | ||
627 | |||
628 | /* Check for single stepping */ | ||
629 | GET_THREAD_INFO(r29) | ||
630 | { | ||
631 | /* Get pointer to field holding step state */ | ||
632 | addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET | ||
633 | |||
634 | /* Get pointer to EX1 in register state */ | ||
635 | PTREGS_PTR(r27, PTREGS_OFFSET_EX1) | ||
636 | } | ||
637 | { | ||
638 | /* Get pointer to field holding PC */ | ||
639 | PTREGS_PTR(r28, PTREGS_OFFSET_PC) | ||
640 | |||
641 | /* Load the pointer to the step state */ | ||
642 | lw r29, r29 | ||
643 | } | ||
644 | /* Load EX1 */ | ||
645 | lw r27, r27 | ||
646 | { | ||
647 | /* Points to flags */ | ||
648 | addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET | ||
649 | |||
650 | /* No single stepping if there is no step state structure */ | ||
651 | bzt r29, \not_single_stepping | ||
652 | } | ||
653 | { | ||
654 | /* mask off ICS and any other high bits */ | ||
655 | andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK | ||
656 | |||
657 | /* Load pointer to single step instruction buffer */ | ||
658 | lw r26, r29 | ||
659 | } | ||
660 | /* Check priv state */ | ||
661 | bnz r27, \not_single_stepping | ||
662 | |||
663 | /* Get flags */ | ||
664 | lw r22, r23 | ||
665 | { | ||
666 | /* Branch if single-step mode not enabled */ | ||
667 | bbnst r22, \not_single_stepping | ||
668 | |||
669 | /* Clear enabled flag */ | ||
670 | andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED | ||
671 | } | ||
672 | .ifc \kind,normal | ||
673 | { | ||
674 | /* Load PC */ | ||
675 | lw r27, r28 | ||
676 | |||
677 | /* Point to the entry containing the original PC */ | ||
678 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET | ||
679 | } | ||
680 | { | ||
681 | /* Disable single stepping flag */ | ||
682 | sw r23, r22 | ||
683 | } | ||
684 | { | ||
685 | /* Get the original pc */ | ||
686 | lw r24, r24 | ||
687 | |||
688 | /* See if the PC is at the start of the single step buffer */ | ||
689 | seq r25, r26, r27 | ||
690 | } | ||
691 | /* | ||
692 | * NOTE: it is really expected that the PC be in the single step buffer | ||
693 | * at this point | ||
694 | */ | ||
695 | bzt r25, \not_single_stepping | ||
696 | |||
697 | /* Restore the original PC */ | ||
698 | sw r28, r24 | ||
699 | .else | ||
700 | .ifc \kind,syscall | ||
701 | { | ||
702 | /* Load PC */ | ||
703 | lw r27, r28 | ||
704 | |||
705 | /* Point to the entry containing the next PC */ | ||
706 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET | ||
707 | } | ||
708 | { | ||
709 | /* Increment the stopped PC by the bundle size */ | ||
710 | addi r26, r26, 8 | ||
711 | |||
712 | /* Disable single stepping flag */ | ||
713 | sw r23, r22 | ||
714 | } | ||
715 | { | ||
716 | /* Get the next pc */ | ||
717 | lw r24, r24 | ||
718 | |||
719 | /* | ||
720 | * See if the PC is one bundle past the start of the | ||
721 | * single step buffer | ||
722 | */ | ||
723 | seq r25, r26, r27 | ||
724 | } | ||
725 | { | ||
726 | /* | ||
727 | * NOTE: it is really expected that the PC be in the | ||
728 | * single step buffer at this point | ||
729 | */ | ||
730 | bzt r25, \not_single_stepping | ||
731 | } | ||
732 | /* Set to the next PC */ | ||
733 | sw r28, r24 | ||
734 | .else | ||
735 | { | ||
736 | /* Point to 3rd bundle in buffer */ | ||
737 | addi r25, r26, 16 | ||
738 | |||
739 | /* Load PC */ | ||
740 | lw r27, r28 | ||
741 | } | ||
742 | { | ||
743 | /* Disable single stepping flag */ | ||
744 | sw r23, r22 | ||
745 | |||
746 | /* See if the PC is in the single step buffer */ | ||
747 | slte_u r24, r26, r27 | ||
748 | } | ||
749 | { | ||
750 | slte_u r25, r27, r25 | ||
751 | |||
752 | /* | ||
753 | * NOTE: it is really expected that the PC be in the | ||
754 | * single step buffer at this point | ||
755 | */ | ||
756 | bzt r24, \not_single_stepping | ||
757 | } | ||
758 | bzt r25, \not_single_stepping | ||
759 | .endif | ||
760 | .endif | ||
761 | .endm | ||
762 | |||
763 | /* | ||
764 | * Redispatch a downcall. | ||
765 | */ | ||
766 | .macro dc_dispatch vecnum, vecname | ||
767 | .org (\vecnum << 8) | ||
768 | intvec_\vecname: | ||
769 | j hv_downcall_dispatch | ||
770 | ENDPROC(intvec_\vecname) | ||
771 | .endm | ||
772 | |||
773 | /* | ||
774 | * Common code for most interrupts. The C function we're eventually | ||
775 | * going to is in r0, and the faultnum is in r1; the original | ||
776 | * values for those registers are on the stack. | ||
777 | */ | ||
778 | .pushsection .text.handle_interrupt,"ax" | ||
779 | handle_interrupt: | ||
780 | finish_interrupt_save handle_interrupt | ||
781 | |||
782 | /* | ||
783 | * Check for if we are single stepping in user level. If so, then | ||
784 | * we need to restore the PC. | ||
785 | */ | ||
786 | |||
787 | check_single_stepping normal, .Ldispatch_interrupt | ||
788 | .Ldispatch_interrupt: | ||
789 | |||
790 | /* Jump to the C routine; it should enable irqs as soon as possible. */ | ||
791 | { | ||
792 | jalr r0 | ||
793 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
794 | } | ||
795 | FEEDBACK_REENTER(handle_interrupt) | ||
796 | { | ||
797 | movei r30, 0 /* not an NMI */ | ||
798 | j interrupt_return | ||
799 | } | ||
800 | STD_ENDPROC(handle_interrupt) | ||
801 | |||
802 | /* | ||
803 | * This routine takes a boolean in r30 indicating if this is an NMI. | ||
804 | * If so, we also expect a boolean in r31 indicating whether to | ||
805 | * re-enable the oprofile interrupts. | ||
806 | */ | ||
807 | STD_ENTRY(interrupt_return) | ||
808 | /* If we're resuming to kernel space, don't check thread flags. */ | ||
809 | { | ||
810 | bnz r30, .Lrestore_all /* NMIs don't special-case user-space */ | ||
811 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
812 | } | ||
813 | lw r29, r29 | ||
814 | andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
815 | { | ||
816 | bzt r29, .Lresume_userspace | ||
817 | PTREGS_PTR(r29, PTREGS_OFFSET_PC) | ||
818 | } | ||
819 | |||
820 | /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ | ||
821 | { | ||
822 | lw r28, r29 | ||
823 | moveli r27, lo16(_cpu_idle_nap) | ||
824 | } | ||
825 | { | ||
826 | auli r27, r27, ha16(_cpu_idle_nap) | ||
827 | } | ||
828 | { | ||
829 | seq r27, r27, r28 | ||
830 | } | ||
831 | { | ||
832 | bbns r27, .Lrestore_all | ||
833 | addi r28, r28, 8 | ||
834 | } | ||
835 | sw r29, r28 | ||
836 | j .Lrestore_all | ||
837 | |||
838 | .Lresume_userspace: | ||
839 | FEEDBACK_REENTER(interrupt_return) | ||
840 | |||
841 | /* | ||
842 | * Disable interrupts so as to make sure we don't | ||
843 | * miss an interrupt that sets any of the thread flags (like | ||
844 | * need_resched or sigpending) between sampling and the iret. | ||
845 | * Routines like schedule() or do_signal() may re-enable | ||
846 | * interrupts before returning. | ||
847 | */ | ||
848 | IRQ_DISABLE(r20, r21) | ||
849 | TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ | ||
850 | |||
851 | /* Get base of stack in r32; note r30/31 are used as arguments here. */ | ||
852 | GET_THREAD_INFO(r32) | ||
853 | |||
854 | |||
855 | /* Check to see if there is any work to do before returning to user. */ | ||
856 | { | ||
857 | addi r29, r32, THREAD_INFO_FLAGS_OFFSET | ||
858 | moveli r28, lo16(_TIF_ALLWORK_MASK) | ||
859 | } | ||
860 | { | ||
861 | lw r29, r29 | ||
862 | auli r28, r28, ha16(_TIF_ALLWORK_MASK) | ||
863 | } | ||
864 | and r28, r29, r28 | ||
865 | bnz r28, .Lwork_pending | ||
866 | |||
867 | /* | ||
868 | * In the NMI case we | ||
869 | * omit the call to single_process_check_nohz, which normally checks | ||
870 | * to see if we should start or stop the scheduler tick, because | ||
871 | * we can't call arbitrary Linux code from an NMI context. | ||
872 | * We always call the homecache TLB deferral code to re-trigger | ||
873 | * the deferral mechanism. | ||
874 | * | ||
875 | * The other chunk of responsibility this code has is to reset the | ||
876 | * interrupt masks appropriately to reset irqs and NMIs. We have | ||
877 | * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the | ||
878 | * lockdep-type stuff, but we can't set ICS until afterwards, since | ||
879 | * ICS can only be used in very tight chunks of code to avoid | ||
880 | * tripping over various assertions that it is off. | ||
881 | * | ||
882 | * (There is what looks like a window of vulnerability here since | ||
883 | * we might take a profile interrupt between the two SPR writes | ||
884 | * that set the mask, but since we write the low SPR word first, | ||
885 | * and our interrupt entry code checks the low SPR word, any | ||
886 | * profile interrupt will actually disable interrupts in both SPRs | ||
887 | * before returning, which is OK.) | ||
888 | */ | ||
889 | .Lrestore_all: | ||
890 | PTREGS_PTR(r0, PTREGS_OFFSET_EX1) | ||
891 | { | ||
892 | lw r0, r0 | ||
893 | PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS) | ||
894 | } | ||
895 | { | ||
896 | andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK | ||
897 | lw r32, r32 | ||
898 | } | ||
899 | bnz r0, 1f | ||
900 | j 2f | ||
901 | #if PT_FLAGS_DISABLE_IRQ != 1 | ||
902 | # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below | ||
903 | #endif | ||
904 | 1: bbnst r32, 2f | ||
905 | IRQ_DISABLE(r20,r21) | ||
906 | TRACE_IRQS_OFF | ||
907 | movei r0, 1 | ||
908 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
909 | bzt r30, .Lrestore_regs | ||
910 | j 3f | ||
911 | 2: TRACE_IRQS_ON | ||
912 | movei r0, 1 | ||
913 | mtspr INTERRUPT_CRITICAL_SECTION, r0 | ||
914 | IRQ_ENABLE(r20, r21) | ||
915 | bzt r30, .Lrestore_regs | ||
916 | 3: | ||
917 | |||
918 | |||
919 | /* | ||
920 | * We now commit to returning from this interrupt, since we will be | ||
921 | * doing things like setting EX_CONTEXT SPRs and unwinding the stack | ||
922 | * frame. No calls should be made to any other code after this point. | ||
923 | * This code should only be entered with ICS set. | ||
924 | * r32 must still be set to ptregs.flags. | ||
925 | * We launch loads to each cache line separately first, so we can | ||
926 | * get some parallelism out of the memory subsystem. | ||
927 | * We start zeroing caller-saved registers throughout, since | ||
928 | * that will save some cycles if this turns out to be a syscall. | ||
929 | */ | ||
930 | .Lrestore_regs: | ||
931 | FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */ | ||
932 | |||
933 | /* | ||
934 | * Rotate so we have one high bit and one low bit to test. | ||
935 | * - low bit says whether to restore all the callee-saved registers, | ||
936 | * or just r30-r33, and r52 up. | ||
937 | * - high bit (i.e. sign bit) says whether to restore all the | ||
938 | * caller-saved registers, or just r0. | ||
939 | */ | ||
940 | #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4 | ||
941 | # error Rotate trick does not work :-) | ||
942 | #endif | ||
943 | { | ||
944 | rli r20, r32, 30 | ||
945 | PTREGS_PTR(sp, PTREGS_OFFSET_REG(0)) | ||
946 | } | ||
947 | |||
948 | /* | ||
949 | * Load cache lines 0, 2, and 3 in that order, then use | ||
950 | * the last loaded value, which makes it likely that the other | ||
951 | * cache lines have also loaded, at which point we should be | ||
952 | * able to safely read all the remaining words on those cache | ||
953 | * lines without waiting for the memory subsystem. | ||
954 | */ | ||
955 | pop_reg_zero r0, r1, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0) | ||
956 | pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30) | ||
957 | pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
958 | pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1 | ||
959 | { | ||
960 | mtspr EX_CONTEXT_1_0, r21 | ||
961 | move r5, zero | ||
962 | } | ||
963 | { | ||
964 | mtspr EX_CONTEXT_1_1, lr | ||
965 | andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
966 | } | ||
967 | |||
968 | /* Restore callee-saveds that we actually use. */ | ||
969 | pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52) | ||
970 | pop_reg_zero r31, r7 | ||
971 | pop_reg_zero r32, r8 | ||
972 | pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33) | ||
973 | |||
974 | /* | ||
975 | * If we modified other callee-saveds, restore them now. | ||
976 | * This is rare, but could be via ptrace or signal handler. | ||
977 | */ | ||
978 | { | ||
979 | move r10, zero | ||
980 | bbs r20, .Lrestore_callees | ||
981 | } | ||
982 | .Lcontinue_restore_regs: | ||
983 | |||
984 | /* Check if we're returning from a syscall. */ | ||
985 | { | ||
986 | move r11, zero | ||
987 | blzt r20, 1f /* no, so go restore callee-save registers */ | ||
988 | } | ||
989 | |||
990 | /* | ||
991 | * Check if we're returning to userspace. | ||
992 | * Note that if we're not, we don't worry about zeroing everything. | ||
993 | */ | ||
994 | { | ||
995 | addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29) | ||
996 | bnz lr, .Lkernel_return | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * On return from syscall, we've restored r0 from pt_regs, but we | ||
1001 | * clear the remainder of the caller-saved registers. We could | ||
1002 | * restore the syscall arguments, but there's not much point, | ||
1003 | * and it ensures user programs aren't trying to use the | ||
1004 | * caller-saves if we clear them, as well as avoiding leaking | ||
1005 | * kernel pointers into userspace. | ||
1006 | */ | ||
1007 | pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
1008 | pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
1009 | { | ||
1010 | lw sp, sp | ||
1011 | move r14, zero | ||
1012 | move r15, zero | ||
1013 | } | ||
1014 | { move r16, zero; move r17, zero } | ||
1015 | { move r18, zero; move r19, zero } | ||
1016 | { move r20, zero; move r21, zero } | ||
1017 | { move r22, zero; move r23, zero } | ||
1018 | { move r24, zero; move r25, zero } | ||
1019 | { move r26, zero; move r27, zero } | ||
1020 | { move r28, zero; move r29, zero } | ||
1021 | iret | ||
1022 | |||
1023 | /* | ||
1024 | * Not a syscall, so restore caller-saved registers. | ||
1025 | * First kick off a load for cache line 1, which we're touching | ||
1026 | * for the first time here. | ||
1027 | */ | ||
1028 | .align 64 | ||
1029 | 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29) | ||
1030 | pop_reg r1 | ||
1031 | pop_reg r2 | ||
1032 | pop_reg r3 | ||
1033 | pop_reg r4 | ||
1034 | pop_reg r5 | ||
1035 | pop_reg r6 | ||
1036 | pop_reg r7 | ||
1037 | pop_reg r8 | ||
1038 | pop_reg r9 | ||
1039 | pop_reg r10 | ||
1040 | pop_reg r11 | ||
1041 | pop_reg r12 | ||
1042 | pop_reg r13 | ||
1043 | pop_reg r14 | ||
1044 | pop_reg r15 | ||
1045 | pop_reg r16 | ||
1046 | pop_reg r17 | ||
1047 | pop_reg r18 | ||
1048 | pop_reg r19 | ||
1049 | pop_reg r20 | ||
1050 | pop_reg r21 | ||
1051 | pop_reg r22 | ||
1052 | pop_reg r23 | ||
1053 | pop_reg r24 | ||
1054 | pop_reg r25 | ||
1055 | pop_reg r26 | ||
1056 | pop_reg r27 | ||
1057 | pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28) | ||
1058 | /* r29 already restored above */ | ||
1059 | bnz lr, .Lkernel_return | ||
1060 | pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR | ||
1061 | pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP | ||
1062 | lw sp, sp | ||
1063 | iret | ||
1064 | |||
1065 | /* | ||
1066 | * We can't restore tp when in kernel mode, since a thread might | ||
1067 | * have migrated from another cpu and brought a stale tp value. | ||
1068 | */ | ||
1069 | .Lkernel_return: | ||
1070 | pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR | ||
1071 | lw sp, sp | ||
1072 | iret | ||
1073 | |||
1074 | /* Restore callee-saved registers from r34 to r51. */ | ||
1075 | .Lrestore_callees: | ||
1076 | addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29) | ||
1077 | pop_reg r34 | ||
1078 | pop_reg r35 | ||
1079 | pop_reg r36 | ||
1080 | pop_reg r37 | ||
1081 | pop_reg r38 | ||
1082 | pop_reg r39 | ||
1083 | pop_reg r40 | ||
1084 | pop_reg r41 | ||
1085 | pop_reg r42 | ||
1086 | pop_reg r43 | ||
1087 | pop_reg r44 | ||
1088 | pop_reg r45 | ||
1089 | pop_reg r46 | ||
1090 | pop_reg r47 | ||
1091 | pop_reg r48 | ||
1092 | pop_reg r49 | ||
1093 | pop_reg r50 | ||
1094 | pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51) | ||
1095 | j .Lcontinue_restore_regs | ||
1096 | |||
1097 | .Lwork_pending: | ||
1098 | /* Mask the reschedule flag */ | ||
1099 | andi r28, r29, _TIF_NEED_RESCHED | ||
1100 | |||
1101 | { | ||
1102 | /* | ||
1103 | * If the NEED_RESCHED flag is called, we call schedule(), which | ||
1104 | * may drop this context right here and go do something else. | ||
1105 | * On return, jump back to .Lresume_userspace and recheck. | ||
1106 | */ | ||
1107 | bz r28, .Lasync_tlb | ||
1108 | |||
1109 | /* Mask the async-tlb flag */ | ||
1110 | andi r28, r29, _TIF_ASYNC_TLB | ||
1111 | } | ||
1112 | |||
1113 | jal schedule | ||
1114 | FEEDBACK_REENTER(interrupt_return) | ||
1115 | |||
1116 | /* Reload the flags and check again */ | ||
1117 | j .Lresume_userspace | ||
1118 | |||
1119 | .Lasync_tlb: | ||
1120 | { | ||
1121 | bz r28, .Lneed_sigpending | ||
1122 | |||
1123 | /* Mask the sigpending flag */ | ||
1124 | andi r28, r29, _TIF_SIGPENDING | ||
1125 | } | ||
1126 | |||
1127 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1128 | jal do_async_page_fault | ||
1129 | FEEDBACK_REENTER(interrupt_return) | ||
1130 | |||
1131 | /* | ||
1132 | * Go restart the "resume userspace" process. We may have | ||
1133 | * fired a signal, and we need to disable interrupts again. | ||
1134 | */ | ||
1135 | j .Lresume_userspace | ||
1136 | |||
1137 | .Lneed_sigpending: | ||
1138 | /* | ||
1139 | * At this point we are either doing signal handling or single-step, | ||
1140 | * so either way make sure we have all the registers saved. | ||
1141 | */ | ||
1142 | push_extra_callee_saves r0 | ||
1143 | |||
1144 | { | ||
1145 | /* If no signal pending, skip to singlestep check */ | ||
1146 | bz r28, .Lneed_singlestep | ||
1147 | |||
1148 | /* Mask the singlestep flag */ | ||
1149 | andi r28, r29, _TIF_SINGLESTEP | ||
1150 | } | ||
1151 | |||
1152 | jal do_signal | ||
1153 | FEEDBACK_REENTER(interrupt_return) | ||
1154 | |||
1155 | /* Reload the flags and check again */ | ||
1156 | j .Lresume_userspace | ||
1157 | |||
1158 | .Lneed_singlestep: | ||
1159 | { | ||
1160 | /* Get a pointer to the EX1 field */ | ||
1161 | PTREGS_PTR(r29, PTREGS_OFFSET_EX1) | ||
1162 | |||
1163 | /* If we get here, our bit must be set. */ | ||
1164 | bz r28, .Lwork_confusion | ||
1165 | } | ||
1166 | /* If we are in priv mode, don't single step */ | ||
1167 | lw r28, r29 | ||
1168 | andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ | ||
1169 | bnz r28, .Lrestore_all | ||
1170 | |||
1171 | /* Allow interrupts within the single step code */ | ||
1172 | TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */ | ||
1173 | IRQ_ENABLE(r20, r21) | ||
1174 | |||
1175 | /* try to single-step the current instruction */ | ||
1176 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1177 | jal single_step_once | ||
1178 | FEEDBACK_REENTER(interrupt_return) | ||
1179 | |||
1180 | /* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */ | ||
1181 | IRQ_DISABLE(r20,r21) | ||
1182 | |||
1183 | j .Lrestore_all | ||
1184 | |||
1185 | .Lwork_confusion: | ||
1186 | move r0, r28 | ||
1187 | panic "thread_info allwork flags unhandled on userspace resume: %#x" | ||
1188 | |||
1189 | STD_ENDPROC(interrupt_return) | ||
1190 | |||
1191 | /* | ||
1192 | * This interrupt variant clears the INT_INTCTRL_1 interrupt mask bit | ||
1193 | * before returning, so we can properly get more downcalls. | ||
1194 | */ | ||
1195 | .pushsection .text.handle_interrupt_downcall,"ax" | ||
1196 | handle_interrupt_downcall: | ||
1197 | finish_interrupt_save handle_interrupt_downcall | ||
1198 | check_single_stepping normal, .Ldispatch_downcall | ||
1199 | .Ldispatch_downcall: | ||
1200 | |||
1201 | /* Clear INTCTRL_1 from the set of interrupts we ever enable. */ | ||
1202 | GET_INTERRUPTS_ENABLED_MASK_PTR(r30) | ||
1203 | { | ||
1204 | addi r30, r30, 4 | ||
1205 | movei r31, INT_MASK(INT_INTCTRL_1) | ||
1206 | } | ||
1207 | { | ||
1208 | lw r20, r30 | ||
1209 | nor r21, r31, zero | ||
1210 | } | ||
1211 | and r20, r20, r21 | ||
1212 | sw r30, r20 | ||
1213 | |||
1214 | { | ||
1215 | jalr r0 | ||
1216 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1217 | } | ||
1218 | FEEDBACK_REENTER(handle_interrupt_downcall) | ||
1219 | |||
1220 | /* Allow INTCTRL_1 to be enabled next time we enable interrupts. */ | ||
1221 | lw r20, r30 | ||
1222 | or r20, r20, r31 | ||
1223 | sw r30, r20 | ||
1224 | |||
1225 | { | ||
1226 | movei r30, 0 /* not an NMI */ | ||
1227 | j interrupt_return | ||
1228 | } | ||
1229 | STD_ENDPROC(handle_interrupt_downcall) | ||
1230 | |||
1231 | /* | ||
1232 | * Some interrupts don't check for single stepping | ||
1233 | */ | ||
1234 | .pushsection .text.handle_interrupt_no_single_step,"ax" | ||
1235 | handle_interrupt_no_single_step: | ||
1236 | finish_interrupt_save handle_interrupt_no_single_step | ||
1237 | { | ||
1238 | jalr r0 | ||
1239 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1240 | } | ||
1241 | FEEDBACK_REENTER(handle_interrupt_no_single_step) | ||
1242 | { | ||
1243 | movei r30, 0 /* not an NMI */ | ||
1244 | j interrupt_return | ||
1245 | } | ||
1246 | STD_ENDPROC(handle_interrupt_no_single_step) | ||
1247 | |||
1248 | /* | ||
1249 | * "NMI" interrupts mask ALL interrupts before calling the | ||
1250 | * handler, and don't check thread flags, etc., on the way | ||
1251 | * back out. In general, the only things we do here for NMIs | ||
1252 | * are the register save/restore, fixing the PC if we were | ||
1253 | * doing single step, and the dataplane kernel-TLB management. | ||
1254 | * We don't (for example) deal with start/stop of the sched tick. | ||
1255 | */ | ||
1256 | .pushsection .text.handle_nmi,"ax" | ||
1257 | handle_nmi: | ||
1258 | finish_interrupt_save handle_nmi | ||
1259 | check_single_stepping normal, .Ldispatch_nmi | ||
1260 | .Ldispatch_nmi: | ||
1261 | { | ||
1262 | jalr r0 | ||
1263 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1264 | } | ||
1265 | FEEDBACK_REENTER(handle_nmi) | ||
1266 | j interrupt_return | ||
1267 | STD_ENDPROC(handle_nmi) | ||
1268 | |||
1269 | /* | ||
1270 | * Parallel code for syscalls to handle_interrupt. | ||
1271 | */ | ||
1272 | .pushsection .text.handle_syscall,"ax" | ||
1273 | handle_syscall: | ||
1274 | finish_interrupt_save handle_syscall | ||
1275 | |||
1276 | /* | ||
1277 | * Check for if we are single stepping in user level. If so, then | ||
1278 | * we need to restore the PC. | ||
1279 | */ | ||
1280 | check_single_stepping syscall, .Ldispatch_syscall | ||
1281 | .Ldispatch_syscall: | ||
1282 | |||
1283 | /* Enable irqs. */ | ||
1284 | TRACE_IRQS_ON | ||
1285 | IRQ_ENABLE(r20, r21) | ||
1286 | |||
1287 | /* Bump the counter for syscalls made on this tile. */ | ||
1288 | moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
1289 | auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) | ||
1290 | add r20, r20, tp | ||
1291 | lw r21, r20 | ||
1292 | addi r21, r21, 1 | ||
1293 | sw r20, r21 | ||
1294 | |||
1295 | /* Trace syscalls, if requested. */ | ||
1296 | GET_THREAD_INFO(r31) | ||
1297 | addi r31, r31, THREAD_INFO_FLAGS_OFFSET | ||
1298 | lw r30, r31 | ||
1299 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1300 | bzt r30, .Lrestore_syscall_regs | ||
1301 | jal do_syscall_trace | ||
1302 | FEEDBACK_REENTER(handle_syscall) | ||
1303 | |||
1304 | /* | ||
1305 | * We always reload our registers from the stack at this | ||
1306 | * point. They might be valid, if we didn't build with | ||
1307 | * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not | ||
1308 | * doing syscall tracing, but there are enough cases now that it | ||
1309 | * seems simplest just to do the reload unconditionally. | ||
1310 | */ | ||
1311 | .Lrestore_syscall_regs: | ||
1312 | PTREGS_PTR(r11, PTREGS_OFFSET_REG(0)) | ||
1313 | pop_reg r0, r11 | ||
1314 | pop_reg r1, r11 | ||
1315 | pop_reg r2, r11 | ||
1316 | pop_reg r3, r11 | ||
1317 | pop_reg r4, r11 | ||
1318 | pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5) | ||
1319 | pop_reg TREG_SYSCALL_NR_NAME, r11 | ||
1320 | |||
1321 | /* Ensure that the syscall number is within the legal range. */ | ||
1322 | moveli r21, __NR_syscalls | ||
1323 | { | ||
1324 | slt_u r21, TREG_SYSCALL_NR_NAME, r21 | ||
1325 | moveli r20, lo16(sys_call_table) | ||
1326 | } | ||
1327 | { | ||
1328 | bbns r21, .Linvalid_syscall | ||
1329 | auli r20, r20, ha16(sys_call_table) | ||
1330 | } | ||
1331 | s2a r20, TREG_SYSCALL_NR_NAME, r20 | ||
1332 | lw r20, r20 | ||
1333 | |||
1334 | /* Jump to syscall handler. */ | ||
1335 | jalr r20; .Lhandle_syscall_link: | ||
1336 | FEEDBACK_REENTER(handle_syscall) | ||
1337 | |||
1338 | /* | ||
1339 | * Write our r0 onto the stack so it gets restored instead | ||
1340 | * of whatever the user had there before. | ||
1341 | */ | ||
1342 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1343 | sw r29, r0 | ||
1344 | |||
1345 | /* Do syscall trace again, if requested. */ | ||
1346 | lw r30, r31 | ||
1347 | andi r30, r30, _TIF_SYSCALL_TRACE | ||
1348 | bzt r30, 1f | ||
1349 | jal do_syscall_trace | ||
1350 | FEEDBACK_REENTER(handle_syscall) | ||
1351 | 1: j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1352 | |||
1353 | .Linvalid_syscall: | ||
1354 | /* Report an invalid syscall back to the user program */ | ||
1355 | { | ||
1356 | PTREGS_PTR(r29, PTREGS_OFFSET_REG(0)) | ||
1357 | movei r28, -ENOSYS | ||
1358 | } | ||
1359 | sw r29, r28 | ||
1360 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1361 | STD_ENDPROC(handle_syscall) | ||
1362 | |||
1363 | /* Return the address for oprofile to suppress in backtraces. */ | ||
1364 | STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall) | ||
1365 | lnk r0 | ||
1366 | { | ||
1367 | addli r0, r0, .Lhandle_syscall_link - . | ||
1368 | jrp lr | ||
1369 | } | ||
1370 | STD_ENDPROC(handle_syscall_link_address) | ||
1371 | |||
1372 | STD_ENTRY(ret_from_fork) | ||
1373 | jal sim_notify_fork | ||
1374 | jal schedule_tail | ||
1375 | FEEDBACK_REENTER(ret_from_fork) | ||
1376 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1377 | STD_ENDPROC(ret_from_fork) | ||
1378 | |||
1379 | /* | ||
1380 | * Code for ill interrupt. | ||
1381 | */ | ||
1382 | .pushsection .text.handle_ill,"ax" | ||
1383 | handle_ill: | ||
1384 | finish_interrupt_save handle_ill | ||
1385 | |||
1386 | /* | ||
1387 | * Check for if we are single stepping in user level. If so, then | ||
1388 | * we need to restore the PC. | ||
1389 | */ | ||
1390 | check_single_stepping ill, .Ldispatch_normal_ill | ||
1391 | |||
1392 | { | ||
1393 | /* See if the PC is the 1st bundle in the buffer */ | ||
1394 | seq r25, r27, r26 | ||
1395 | |||
1396 | /* Point to the 2nd bundle in the buffer */ | ||
1397 | addi r26, r26, 8 | ||
1398 | } | ||
1399 | { | ||
1400 | /* Point to the original pc */ | ||
1401 | addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET | ||
1402 | |||
1403 | /* Branch if the PC is the 1st bundle in the buffer */ | ||
1404 | bnz r25, 3f | ||
1405 | } | ||
1406 | { | ||
1407 | /* See if the PC is the 2nd bundle of the buffer */ | ||
1408 | seq r25, r27, r26 | ||
1409 | |||
1410 | /* Set PC to next instruction */ | ||
1411 | addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET | ||
1412 | } | ||
1413 | { | ||
1414 | /* Point to flags */ | ||
1415 | addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET | ||
1416 | |||
1417 | /* Branch if PC is in the second bundle */ | ||
1418 | bz r25, 2f | ||
1419 | } | ||
1420 | /* Load flags */ | ||
1421 | lw r25, r25 | ||
1422 | { | ||
1423 | /* | ||
1424 | * Get the offset for the register to restore | ||
1425 | * Note: the lower bound is 2, so we have implicit scaling by 4. | ||
1426 | * No multiplication of the register number by the size of a register | ||
1427 | * is needed. | ||
1428 | */ | ||
1429 | mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \ | ||
1430 | SINGLESTEP_STATE_TARGET_UB | ||
1431 | |||
1432 | /* Mask Rewrite_LR */ | ||
1433 | andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE | ||
1434 | } | ||
1435 | { | ||
1436 | addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET | ||
1437 | |||
1438 | /* Don't rewrite temp register */ | ||
1439 | bz r25, 3f | ||
1440 | } | ||
1441 | { | ||
1442 | /* Get the temp value */ | ||
1443 | lw r29, r29 | ||
1444 | |||
1445 | /* Point to where the register is stored */ | ||
1446 | add r27, r27, sp | ||
1447 | } | ||
1448 | |||
1449 | /* Add in the C ABI save area size to the register offset */ | ||
1450 | addi r27, r27, C_ABI_SAVE_AREA_SIZE | ||
1451 | |||
1452 | /* Restore the user's register with the temp value */ | ||
1453 | sw r27, r29 | ||
1454 | j 3f | ||
1455 | |||
1456 | 2: | ||
1457 | /* Must be in the third bundle */ | ||
1458 | addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET | ||
1459 | |||
1460 | 3: | ||
1461 | /* set PC and continue */ | ||
1462 | lw r26, r24 | ||
1463 | sw r28, r26 | ||
1464 | |||
1465 | /* Clear TIF_SINGLESTEP */ | ||
1466 | GET_THREAD_INFO(r0) | ||
1467 | |||
1468 | addi r1, r0, THREAD_INFO_FLAGS_OFFSET | ||
1469 | { | ||
1470 | lw r2, r1 | ||
1471 | addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */ | ||
1472 | } | ||
1473 | andi r2, r2, ~_TIF_SINGLESTEP | ||
1474 | sw r1, r2 | ||
1475 | |||
1476 | /* Issue a sigtrap */ | ||
1477 | { | ||
1478 | lw r0, r0 /* indirect thru thread_info to get task_info*/ | ||
1479 | addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */ | ||
1480 | move r2, zero /* load error code into r2 */ | ||
1481 | } | ||
1482 | |||
1483 | jal send_sigtrap /* issue a SIGTRAP */ | ||
1484 | FEEDBACK_REENTER(handle_ill) | ||
1485 | j .Lresume_userspace /* jump into middle of interrupt_return */ | ||
1486 | |||
1487 | .Ldispatch_normal_ill: | ||
1488 | { | ||
1489 | jalr r0 | ||
1490 | PTREGS_PTR(r0, PTREGS_OFFSET_BASE) | ||
1491 | } | ||
1492 | FEEDBACK_REENTER(handle_ill) | ||
1493 | { | ||
1494 | movei r30, 0 /* not an NMI */ | ||
1495 | j interrupt_return | ||
1496 | } | ||
1497 | STD_ENDPROC(handle_ill) | ||
1498 | |||
1499 | .pushsection .rodata, "a" | ||
1500 | .align 8 | ||
1501 | bpt_code: | ||
1502 | bpt | ||
1503 | ENDPROC(bpt_code) | ||
1504 | .popsection | ||
1505 | |||
1506 | /* Various stub interrupt handlers and syscall handlers */ | ||
1507 | |||
1508 | STD_ENTRY_LOCAL(_kernel_double_fault) | ||
1509 | mfspr r1, EX_CONTEXT_1_0 | ||
1510 | move r2, lr | ||
1511 | move r3, sp | ||
1512 | move r4, r52 | ||
1513 | addi sp, sp, -C_ABI_SAVE_AREA_SIZE | ||
1514 | j kernel_double_fault | ||
1515 | STD_ENDPROC(_kernel_double_fault) | ||
1516 | |||
1517 | STD_ENTRY_LOCAL(bad_intr) | ||
1518 | mfspr r2, EX_CONTEXT_1_0 | ||
1519 | panic "Unhandled interrupt %#x: PC %#lx" | ||
1520 | STD_ENDPROC(bad_intr) | ||
1521 | |||
1522 | /* Put address of pt_regs in reg and jump. */ | ||
1523 | #define PTREGS_SYSCALL(x, reg) \ | ||
1524 | STD_ENTRY(x); \ | ||
1525 | { \ | ||
1526 | PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \ | ||
1527 | j _##x \ | ||
1528 | }; \ | ||
1529 | STD_ENDPROC(x) | ||
1530 | |||
1531 | PTREGS_SYSCALL(sys_execve, r3) | ||
1532 | PTREGS_SYSCALL(sys_sigaltstack, r2) | ||
1533 | PTREGS_SYSCALL(sys_rt_sigreturn, r0) | ||
1534 | |||
1535 | /* Save additional callee-saves to pt_regs, put address in reg and jump. */ | ||
1536 | #define PTREGS_SYSCALL_ALL_REGS(x, reg) \ | ||
1537 | STD_ENTRY(x); \ | ||
1538 | push_extra_callee_saves reg; \ | ||
1539 | j _##x; \ | ||
1540 | STD_ENDPROC(x) | ||
1541 | |||
1542 | PTREGS_SYSCALL_ALL_REGS(sys_fork, r0) | ||
1543 | PTREGS_SYSCALL_ALL_REGS(sys_vfork, r0) | ||
1544 | PTREGS_SYSCALL_ALL_REGS(sys_clone, r4) | ||
1545 | PTREGS_SYSCALL_ALL_REGS(sys_cmpxchg_badaddr, r1) | ||
1546 | |||
1547 | /* | ||
1548 | * This entrypoint is taken for the cmpxchg and atomic_update fast | ||
1549 | * swints. We may wish to generalize it to other fast swints at some | ||
1550 | * point, but for now there are just two very similar ones, which | ||
1551 | * makes it faster. | ||
1552 | * | ||
1553 | * The fast swint code is designed to have a small footprint. It does | ||
1554 | * not save or restore any GPRs, counting on the caller-save registers | ||
1555 | * to be available to it on entry. It does not modify any callee-save | ||
1556 | * registers (including "lr"). It does not check what PL it is being | ||
1557 | * called at, so you'd better not call it other than at PL0. | ||
1558 | * | ||
1559 | * It does not use the stack, but since it might be re-interrupted by | ||
1560 | * a page fault which would assume the stack was valid, it does | ||
1561 | * save/restore the stack pointer and zero it out to make sure it gets reset. | ||
1562 | * Since we always keep interrupts disabled, the hypervisor won't | ||
1563 | * clobber our EX_CONTEXT_1_x registers, so we don't save/restore them | ||
1564 | * (other than to advance the PC on return). | ||
1565 | * | ||
1566 | * We have to manually validate the user vs kernel address range | ||
1567 | * (since at PL1 we can read/write both), and for performance reasons | ||
1568 | * we don't allow cmpxchg on the fc000000 memory region, since we only | ||
1569 | * validate that the user address is below PAGE_OFFSET. | ||
1570 | * | ||
1571 | * We place it in the __HEAD section to ensure it is relatively | ||
1572 | * near to the intvec_SWINT_1 code (reachable by a conditional branch). | ||
1573 | * | ||
1574 | * Must match register usage in do_page_fault(). | ||
1575 | */ | ||
1576 | __HEAD | ||
1577 | .align 64 | ||
1578 | /* Align much later jump on the start of a cache line. */ | ||
1579 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1580 | nop; nop | ||
1581 | #endif | ||
1582 | ENTRY(sys_cmpxchg) | ||
1583 | |||
1584 | /* | ||
1585 | * Save "sp" and set it zero for any possible page fault. | ||
1586 | * | ||
1587 | * HACK: We want to both zero sp and check r0's alignment, | ||
1588 | * so we do both at once. If "sp" becomes nonzero we | ||
1589 | * know r0 is unaligned and branch to the error handler that | ||
1590 | * restores sp, so this is OK. | ||
1591 | * | ||
1592 | * ICS is disabled right now so having a garbage but nonzero | ||
1593 | * sp is OK, since we won't execute any faulting instructions | ||
1594 | * when it is nonzero. | ||
1595 | */ | ||
1596 | { | ||
1597 | move r27, sp | ||
1598 | andi sp, r0, 3 | ||
1599 | } | ||
1600 | |||
1601 | /* | ||
1602 | * Get the lock address in ATOMIC_LOCK_REG, and also validate that the | ||
1603 | * address is less than PAGE_OFFSET, since that won't trap at PL1. | ||
1604 | * We only use bits less than PAGE_SHIFT to avoid having to worry | ||
1605 | * about aliasing among multiple mappings of the same physical page, | ||
1606 | * and we ignore the low 3 bits so we have one lock that covers | ||
1607 | * both a cmpxchg64() and a cmpxchg() on either its low or high word. | ||
1608 | * NOTE: this code must match __atomic_hashed_lock() in lib/atomic.c. | ||
1609 | */ | ||
1610 | |||
1611 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1612 | { | ||
1613 | /* Check for unaligned input. */ | ||
1614 | bnz sp, .Lcmpxchg_badaddr | ||
1615 | mm r25, r0, zero, 3, PAGE_SHIFT-1 | ||
1616 | } | ||
1617 | { | ||
1618 | crc32_32 r25, zero, r25 | ||
1619 | moveli r21, lo16(atomic_lock_ptr) | ||
1620 | } | ||
1621 | { | ||
1622 | auli r21, r21, ha16(atomic_lock_ptr) | ||
1623 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ | ||
1624 | } | ||
1625 | { | ||
1626 | shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT | ||
1627 | slt_u r23, r0, r23 | ||
1628 | |||
1629 | /* | ||
1630 | * Ensure that the TLB is loaded before we take out the lock. | ||
1631 | * On TILEPro, this will start fetching the value all the way | ||
1632 | * into our L1 as well (and if it gets modified before we | ||
1633 | * grab the lock, it will be invalidated from our cache | ||
1634 | * before we reload it). On tile64, we'll start fetching it | ||
1635 | * into our L1 if we're the home, and if we're not, we'll | ||
1636 | * still at least start fetching it into the home's L2. | ||
1637 | */ | ||
1638 | lw r26, r0 | ||
1639 | } | ||
1640 | { | ||
1641 | s2a r21, r20, r21 | ||
1642 | bbns r23, .Lcmpxchg_badaddr | ||
1643 | } | ||
1644 | { | ||
1645 | lw r21, r21 | ||
1646 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 | ||
1647 | andi r25, r25, ATOMIC_HASH_L2_SIZE - 1 | ||
1648 | } | ||
1649 | { | ||
1650 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ | ||
1651 | bbs r23, .Lcmpxchg64 | ||
1652 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ | ||
1653 | } | ||
1654 | |||
1655 | { | ||
1656 | /* | ||
1657 | * We very carefully align the code that actually runs with | ||
1658 | * the lock held (nine bundles) so that we know it is all in | ||
1659 | * the icache when we start. This instruction (the jump) is | ||
1660 | * at the start of the first cache line, address zero mod 64; | ||
1661 | * we jump to somewhere in the second cache line to issue the | ||
1662 | * tns, then jump back to finish up. | ||
1663 | */ | ||
1664 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 | ||
1665 | j .Lcmpxchg32_tns | ||
1666 | } | ||
1667 | |||
1668 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
1669 | { | ||
1670 | /* Check for unaligned input. */ | ||
1671 | bnz sp, .Lcmpxchg_badaddr | ||
1672 | auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */ | ||
1673 | } | ||
1674 | { | ||
1675 | /* | ||
1676 | * Slide bits into position for 'mm'. We want to ignore | ||
1677 | * the low 3 bits of r0, and consider only the next | ||
1678 | * ATOMIC_HASH_SHIFT bits. | ||
1679 | * Because of C pointer arithmetic, we want to compute this: | ||
1680 | * | ||
1681 | * ((char*)atomic_locks + | ||
1682 | * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2)) | ||
1683 | * | ||
1684 | * Instead of two shifts we just ">> 1", and use 'mm' | ||
1685 | * to ignore the low and high bits we don't want. | ||
1686 | */ | ||
1687 | shri r25, r0, 1 | ||
1688 | |||
1689 | slt_u r23, r0, r23 | ||
1690 | |||
1691 | /* | ||
1692 | * Ensure that the TLB is loaded before we take out the lock. | ||
1693 | * On tilepro, this will start fetching the value all the way | ||
1694 | * into our L1 as well (and if it gets modified before we | ||
1695 | * grab the lock, it will be invalidated from our cache | ||
1696 | * before we reload it). On tile64, we'll start fetching it | ||
1697 | * into our L1 if we're the home, and if we're not, we'll | ||
1698 | * still at least start fetching it into the home's L2. | ||
1699 | */ | ||
1700 | lw r26, r0 | ||
1701 | } | ||
1702 | { | ||
1703 | /* atomic_locks is page aligned so this suffices to get its addr. */ | ||
1704 | auli r21, zero, hi16(atomic_locks) | ||
1705 | |||
1706 | bbns r23, .Lcmpxchg_badaddr | ||
1707 | } | ||
1708 | { | ||
1709 | /* | ||
1710 | * Insert the hash bits into the page-aligned pointer. | ||
1711 | * ATOMIC_HASH_SHIFT is so big that we don't actually hash | ||
1712 | * the unmasked address bits, as that may cause unnecessary | ||
1713 | * collisions. | ||
1714 | */ | ||
1715 | mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1 | ||
1716 | |||
1717 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64 | ||
1718 | } | ||
1719 | { | ||
1720 | /* Branch away at this point if we're doing a 64-bit cmpxchg. */ | ||
1721 | bbs r23, .Lcmpxchg64 | ||
1722 | andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */ | ||
1723 | } | ||
1724 | { | ||
1725 | /* | ||
1726 | * We very carefully align the code that actually runs with | ||
1727 | * the lock held (nine bundles) so that we know it is all in | ||
1728 | * the icache when we start. This instruction (the jump) is | ||
1729 | * at the start of the first cache line, address zero mod 64; | ||
1730 | * we jump to somewhere in the second cache line to issue the | ||
1731 | * tns, then jump back to finish up. | ||
1732 | */ | ||
1733 | j .Lcmpxchg32_tns | ||
1734 | } | ||
1735 | |||
1736 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
1737 | |||
1738 | ENTRY(__sys_cmpxchg_grab_lock) | ||
1739 | |||
1740 | /* | ||
1741 | * Perform the actual cmpxchg or atomic_update. | ||
1742 | * Note that __futex_mark_unlocked() in uClibc relies on | ||
1743 | * atomic_update() to always perform an "mf", so don't make | ||
1744 | * it optional or conditional without modifying that code. | ||
1745 | */ | ||
1746 | .Ldo_cmpxchg32: | ||
1747 | { | ||
1748 | lw r21, r0 | ||
1749 | seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update | ||
1750 | move r24, r2 | ||
1751 | } | ||
1752 | { | ||
1753 | seq r22, r21, r1 /* See if cmpxchg matches. */ | ||
1754 | and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */ | ||
1755 | } | ||
1756 | { | ||
1757 | or r22, r22, r23 /* Skip compare branch for atomic_update. */ | ||
1758 | add r25, r25, r2 /* Compute (*mem & mask) + addend. */ | ||
1759 | } | ||
1760 | { | ||
1761 | mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */ | ||
1762 | bbns r22, .Lcmpxchg32_mismatch | ||
1763 | } | ||
1764 | sw r0, r24 | ||
1765 | |||
1766 | /* Do slow mtspr here so the following "mf" waits less. */ | ||
1767 | { | ||
1768 | move sp, r27 | ||
1769 | mtspr EX_CONTEXT_1_0, r28 | ||
1770 | } | ||
1771 | mf | ||
1772 | |||
1773 | /* The following instruction is the start of the second cache line. */ | ||
1774 | { | ||
1775 | move r0, r21 | ||
1776 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1777 | } | ||
1778 | iret | ||
1779 | |||
1780 | /* Duplicated code here in the case where we don't overlap "mf" */ | ||
1781 | .Lcmpxchg32_mismatch: | ||
1782 | { | ||
1783 | move r0, r21 | ||
1784 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1785 | } | ||
1786 | { | ||
1787 | move sp, r27 | ||
1788 | mtspr EX_CONTEXT_1_0, r28 | ||
1789 | } | ||
1790 | iret | ||
1791 | |||
1792 | /* | ||
1793 | * The locking code is the same for 32-bit cmpxchg/atomic_update, | ||
1794 | * and for 64-bit cmpxchg. We provide it as a macro and put | ||
1795 | * it into both versions. We can't share the code literally | ||
1796 | * since it depends on having the right branch-back address. | ||
1797 | * Note that the first few instructions should share the cache | ||
1798 | * line with the second half of the actual locked code. | ||
1799 | */ | ||
1800 | .macro cmpxchg_lock, bitwidth | ||
1801 | |||
1802 | /* Lock; if we succeed, jump back up to the read-modify-write. */ | ||
1803 | #ifdef CONFIG_SMP | ||
1804 | tns r21, ATOMIC_LOCK_REG_NAME | ||
1805 | #else | ||
1806 | /* | ||
1807 | * Non-SMP preserves all the lock infrastructure, to keep the | ||
1808 | * code simpler for the interesting (SMP) case. However, we do | ||
1809 | * one small optimization here and in atomic_asm.S, which is | ||
1810 | * to fake out acquiring the actual lock in the atomic_lock table. | ||
1811 | */ | ||
1812 | movei r21, 0 | ||
1813 | #endif | ||
1814 | |||
1815 | /* Issue the slow SPR here while the tns result is in flight. */ | ||
1816 | mfspr r28, EX_CONTEXT_1_0 | ||
1817 | |||
1818 | { | ||
1819 | addi r28, r28, 8 /* return to the instruction after the swint1 */ | ||
1820 | bzt r21, .Ldo_cmpxchg\bitwidth | ||
1821 | } | ||
1822 | /* | ||
1823 | * The preceding instruction is the last thing that must be | ||
1824 | * on the second cache line. | ||
1825 | */ | ||
1826 | |||
1827 | #ifdef CONFIG_SMP | ||
1828 | /* | ||
1829 | * We failed to acquire the tns lock on our first try. Now use | ||
1830 | * bounded exponential backoff to retry, like __atomic_spinlock(). | ||
1831 | */ | ||
1832 | { | ||
1833 | moveli r23, 2048 /* maximum backoff time in cycles */ | ||
1834 | moveli r25, 32 /* starting backoff time in cycles */ | ||
1835 | } | ||
1836 | 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */ | ||
1837 | 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */ | ||
1838 | sub r22, r22, r26 | ||
1839 | slt r22, r22, r25 | ||
1840 | bbst r22, 2b | ||
1841 | { | ||
1842 | shli r25, r25, 1 /* double the backoff; retry the tns */ | ||
1843 | tns r21, ATOMIC_LOCK_REG_NAME | ||
1844 | } | ||
1845 | slt r26, r23, r25 /* is the proposed backoff too big? */ | ||
1846 | { | ||
1847 | mvnz r25, r26, r23 | ||
1848 | bzt r21, .Ldo_cmpxchg\bitwidth | ||
1849 | } | ||
1850 | j 1b | ||
1851 | #endif /* CONFIG_SMP */ | ||
1852 | .endm | ||
1853 | |||
1854 | .Lcmpxchg32_tns: | ||
1855 | cmpxchg_lock 32 | ||
1856 | |||
1857 | /* | ||
1858 | * This code is invoked from sys_cmpxchg after most of the | ||
1859 | * preconditions have been checked. We still need to check | ||
1860 | * that r0 is 8-byte aligned, since if it's not we won't | ||
1861 | * actually be atomic. However, ATOMIC_LOCK_REG has the atomic | ||
1862 | * lock pointer and r27/r28 have the saved SP/PC. | ||
1863 | * r23 is holding "r0 & 7" so we can test for alignment. | ||
1864 | * The compare value is in r2/r3; the new value is in r4/r5. | ||
1865 | * On return, we must put the old value in r0/r1. | ||
1866 | */ | ||
1867 | .align 64 | ||
1868 | .Lcmpxchg64: | ||
1869 | { | ||
1870 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
1871 | s2a ATOMIC_LOCK_REG_NAME, r25, r21 | ||
1872 | #endif | ||
1873 | bzt r23, .Lcmpxchg64_tns | ||
1874 | } | ||
1875 | j .Lcmpxchg_badaddr | ||
1876 | |||
1877 | .Ldo_cmpxchg64: | ||
1878 | { | ||
1879 | lw r21, r0 | ||
1880 | addi r25, r0, 4 | ||
1881 | } | ||
1882 | { | ||
1883 | lw r1, r25 | ||
1884 | } | ||
1885 | seq r26, r21, r2 | ||
1886 | { | ||
1887 | bz r26, .Lcmpxchg64_mismatch | ||
1888 | seq r26, r1, r3 | ||
1889 | } | ||
1890 | { | ||
1891 | bz r26, .Lcmpxchg64_mismatch | ||
1892 | } | ||
1893 | sw r0, r4 | ||
1894 | sw r25, r5 | ||
1895 | |||
1896 | /* | ||
1897 | * The 32-bit path provides optimized "match" and "mismatch" | ||
1898 | * iret paths, but we don't have enough bundles in this cache line | ||
1899 | * to do that, so we just make even the "mismatch" path do an "mf". | ||
1900 | */ | ||
1901 | .Lcmpxchg64_mismatch: | ||
1902 | { | ||
1903 | move sp, r27 | ||
1904 | mtspr EX_CONTEXT_1_0, r28 | ||
1905 | } | ||
1906 | mf | ||
1907 | { | ||
1908 | move r0, r21 | ||
1909 | sw ATOMIC_LOCK_REG_NAME, zero | ||
1910 | } | ||
1911 | iret | ||
1912 | |||
1913 | .Lcmpxchg64_tns: | ||
1914 | cmpxchg_lock 64 | ||
1915 | |||
1916 | |||
1917 | /* | ||
1918 | * Reset sp and revector to sys_cmpxchg_badaddr(), which will | ||
1919 | * just raise the appropriate signal and exit. Doing it this | ||
1920 | * way means we don't have to duplicate the code in intvec.S's | ||
1921 | * int_hand macro that locates the top of the stack. | ||
1922 | */ | ||
1923 | .Lcmpxchg_badaddr: | ||
1924 | { | ||
1925 | moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr | ||
1926 | move sp, r27 | ||
1927 | } | ||
1928 | j intvec_SWINT_1 | ||
1929 | ENDPROC(sys_cmpxchg) | ||
1930 | ENTRY(__sys_cmpxchg_end) | ||
1931 | |||
1932 | |||
1933 | /* The single-step support may need to read all the registers. */ | ||
1934 | int_unalign: | ||
1935 | push_extra_callee_saves r0 | ||
1936 | j do_trap | ||
1937 | |||
1938 | /* Include .intrpt1 array of interrupt vectors */ | ||
1939 | .section ".intrpt1", "ax" | ||
1940 | |||
1941 | #define op_handle_perf_interrupt bad_intr | ||
1942 | #define op_handle_aux_perf_interrupt bad_intr | ||
1943 | |||
1944 | #define do_hardwall_trap bad_intr | ||
1945 | |||
1946 | int_hand INT_ITLB_MISS, ITLB_MISS, \ | ||
1947 | do_page_fault, handle_interrupt_no_single_step | ||
1948 | int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr | ||
1949 | int_hand INT_ILL, ILL, do_trap, handle_ill | ||
1950 | int_hand INT_GPV, GPV, do_trap | ||
1951 | int_hand INT_SN_ACCESS, SN_ACCESS, do_trap | ||
1952 | int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap | ||
1953 | int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap | ||
1954 | int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr | ||
1955 | int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr | ||
1956 | int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr | ||
1957 | int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr | ||
1958 | int_hand INT_SWINT_3, SWINT_3, do_trap | ||
1959 | int_hand INT_SWINT_2, SWINT_2, do_trap | ||
1960 | int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall | ||
1961 | int_hand INT_SWINT_0, SWINT_0, do_trap | ||
1962 | int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign | ||
1963 | int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault | ||
1964 | int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault | ||
1965 | int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault | ||
1966 | int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault | ||
1967 | int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault | ||
1968 | int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr | ||
1969 | int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap | ||
1970 | int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr | ||
1971 | int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap | ||
1972 | int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt | ||
1973 | int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr | ||
1974 | int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr | ||
1975 | int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr | ||
1976 | int_hand INT_IDN_CA, IDN_CA, bad_intr | ||
1977 | int_hand INT_UDN_CA, UDN_CA, bad_intr | ||
1978 | int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr | ||
1979 | int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr | ||
1980 | int_hand INT_PERF_COUNT, PERF_COUNT, \ | ||
1981 | op_handle_perf_interrupt, handle_nmi | ||
1982 | int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr | ||
1983 | int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr | ||
1984 | dc_dispatch INT_INTCTRL_1, INTCTRL_1 | ||
1985 | int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr | ||
1986 | int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \ | ||
1987 | hv_message_intr, handle_interrupt_downcall | ||
1988 | int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \ | ||
1989 | tile_dev_intr, handle_interrupt_downcall | ||
1990 | int_hand INT_I_ASID, I_ASID, bad_intr | ||
1991 | int_hand INT_D_ASID, D_ASID, bad_intr | ||
1992 | int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \ | ||
1993 | do_page_fault, handle_interrupt_downcall | ||
1994 | int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \ | ||
1995 | do_page_fault, handle_interrupt_downcall | ||
1996 | int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \ | ||
1997 | do_page_fault, handle_interrupt_downcall | ||
1998 | int_hand INT_SN_CPL, SN_CPL, bad_intr | ||
1999 | int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap | ||
2000 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
2001 | int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ | ||
2002 | op_handle_aux_perf_interrupt, handle_nmi | ||
2003 | #endif | ||
2004 | |||
2005 | /* Synthetic interrupt delivered only by the simulator */ | ||
2006 | int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint | ||
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c new file mode 100644 index 000000000000..24cc6b2abc2c --- /dev/null +++ b/arch/tile/kernel/irq.c | |||
@@ -0,0 +1,227 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/kernel_stat.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <hv/drv_pcie_rc_intf.h> | ||
22 | |||
23 | /* | ||
24 | * The set of interrupts we enable for raw_local_irq_enable(). | ||
25 | * This is initialized to have just a single interrupt that the kernel | ||
26 | * doesn't actually use as a sentinel. During kernel init, | ||
27 | * interrupts are added as the kernel gets prepared to support them. | ||
28 | * NOTE: we could probably initialize them all statically up front. | ||
29 | */ | ||
30 | DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = | ||
31 | INITIAL_INTERRUPTS_ENABLED; | ||
32 | EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); | ||
33 | |||
34 | /* Define per-tile device interrupt state */ | ||
35 | DEFINE_PER_CPU(HV_IntrState, dev_intr_state); | ||
36 | |||
37 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; | ||
38 | EXPORT_PER_CPU_SYMBOL(irq_stat); | ||
39 | |||
40 | |||
41 | |||
42 | /* | ||
43 | * Interrupt dispatcher, invoked upon a hypervisor device interrupt downcall | ||
44 | */ | ||
45 | void tile_dev_intr(struct pt_regs *regs, int intnum) | ||
46 | { | ||
47 | int irq; | ||
48 | |||
49 | /* | ||
50 | * Get the device interrupt pending mask from where the hypervisor | ||
51 | * has tucked it away for us. | ||
52 | */ | ||
53 | unsigned long pending_dev_intr_mask = __insn_mfspr(SPR_SYSTEM_SAVE_1_3); | ||
54 | |||
55 | |||
56 | /* Track time spent here in an interrupt context. */ | ||
57 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
58 | irq_enter(); | ||
59 | |||
60 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
61 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | ||
62 | { | ||
63 | long sp = stack_pointer - (long) current_thread_info(); | ||
64 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
65 | printk(KERN_EMERG "tile_dev_intr: " | ||
66 | "stack overflow: %ld\n", | ||
67 | sp - sizeof(struct thread_info)); | ||
68 | dump_stack(); | ||
69 | } | ||
70 | } | ||
71 | #endif | ||
72 | |||
73 | for (irq = 0; pending_dev_intr_mask; ++irq) { | ||
74 | if (pending_dev_intr_mask & 0x1) { | ||
75 | generic_handle_irq(irq); | ||
76 | |||
77 | /* Count device irqs; IPIs are counted elsewhere. */ | ||
78 | if (irq > HV_MAX_IPI_INTERRUPT) | ||
79 | __get_cpu_var(irq_stat).irq_dev_intr_count++; | ||
80 | } | ||
81 | pending_dev_intr_mask >>= 1; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Track time spent against the current process again and | ||
86 | * process any softirqs if they are waiting. | ||
87 | */ | ||
88 | irq_exit(); | ||
89 | set_irq_regs(old_regs); | ||
90 | } | ||
91 | |||
92 | |||
93 | /* Mask an interrupt. */ | ||
94 | static void hv_dev_irq_mask(unsigned int irq) | ||
95 | { | ||
96 | HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state); | ||
97 | hv_disable_intr(p_intr_state, 1 << irq); | ||
98 | } | ||
99 | |||
100 | /* Unmask an interrupt. */ | ||
101 | static void hv_dev_irq_unmask(unsigned int irq) | ||
102 | { | ||
103 | /* Re-enable the hypervisor to generate interrupts. */ | ||
104 | HV_IntrState *p_intr_state = &__get_cpu_var(dev_intr_state); | ||
105 | hv_enable_intr(p_intr_state, 1 << irq); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * The HV doesn't latch incoming interrupts while an interrupt is | ||
110 | * disabled, so we need to reenable interrupts before running the | ||
111 | * handler. | ||
112 | * | ||
113 | * ISSUE: Enabling the interrupt this early avoids any race conditions | ||
114 | * but introduces the possibility of nested interrupt stack overflow. | ||
115 | * An imminent change to the HV IRQ model will fix this. | ||
116 | */ | ||
117 | static void hv_dev_irq_ack(unsigned int irq) | ||
118 | { | ||
119 | hv_dev_irq_unmask(irq); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Since ack() reenables interrupts, there's nothing to do at eoi(). | ||
124 | */ | ||
125 | static void hv_dev_irq_eoi(unsigned int irq) | ||
126 | { | ||
127 | } | ||
128 | |||
129 | static struct irq_chip hv_dev_irq_chip = { | ||
130 | .typename = "hv_dev_irq_chip", | ||
131 | .ack = hv_dev_irq_ack, | ||
132 | .mask = hv_dev_irq_mask, | ||
133 | .unmask = hv_dev_irq_unmask, | ||
134 | .eoi = hv_dev_irq_eoi, | ||
135 | }; | ||
136 | |||
137 | static struct irqaction resched_action = { | ||
138 | .handler = handle_reschedule_ipi, | ||
139 | .name = "resched", | ||
140 | .dev_id = handle_reschedule_ipi /* unique token */, | ||
141 | }; | ||
142 | |||
143 | void __init init_IRQ(void) | ||
144 | { | ||
145 | /* Bind IPI irqs. Does this belong somewhere else in init? */ | ||
146 | tile_irq_activate(IRQ_RESCHEDULE); | ||
147 | BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action)); | ||
148 | } | ||
149 | |||
150 | void __cpuinit init_per_tile_IRQs(void) | ||
151 | { | ||
152 | int rc; | ||
153 | |||
154 | /* Set the pointer to the per-tile device interrupt state. */ | ||
155 | HV_IntrState *sv_ptr = &__get_cpu_var(dev_intr_state); | ||
156 | rc = hv_dev_register_intr_state(sv_ptr); | ||
157 | if (rc != HV_OK) | ||
158 | panic("hv_dev_register_intr_state: error %d", rc); | ||
159 | |||
160 | } | ||
161 | |||
162 | void tile_irq_activate(unsigned int irq) | ||
163 | { | ||
164 | /* | ||
165 | * Paravirtualized drivers can call up to the HV to find out | ||
166 | * which irq they're associated with. The HV interface | ||
167 | * doesn't provide a generic call for discovering all valid | ||
168 | * IRQs, so drivers must call this method to initialize newly | ||
169 | * discovered IRQs. | ||
170 | * | ||
171 | * We could also just initialize all 32 IRQs at startup, but | ||
172 | * doing so would lead to a kernel fault if an unexpected | ||
173 | * interrupt fires and jumps to a NULL action. By defering | ||
174 | * the set_irq_chip_and_handler() call, unexpected IRQs are | ||
175 | * handled properly by handle_bad_irq(). | ||
176 | */ | ||
177 | hv_dev_irq_mask(irq); | ||
178 | set_irq_chip_and_handler(irq, &hv_dev_irq_chip, handle_percpu_irq); | ||
179 | } | ||
180 | |||
181 | void ack_bad_irq(unsigned int irq) | ||
182 | { | ||
183 | printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Generic, controller-independent functions: | ||
188 | */ | ||
189 | |||
190 | int show_interrupts(struct seq_file *p, void *v) | ||
191 | { | ||
192 | int i = *(loff_t *) v, j; | ||
193 | struct irqaction *action; | ||
194 | unsigned long flags; | ||
195 | |||
196 | if (i == 0) { | ||
197 | seq_printf(p, " "); | ||
198 | for (j = 0; j < NR_CPUS; j++) | ||
199 | if (cpu_online(j)) | ||
200 | seq_printf(p, "CPU%-8d", j); | ||
201 | seq_putc(p, '\n'); | ||
202 | } | ||
203 | |||
204 | if (i < NR_IRQS) { | ||
205 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
206 | action = irq_desc[i].action; | ||
207 | if (!action) | ||
208 | goto skip; | ||
209 | seq_printf(p, "%3d: ", i); | ||
210 | #ifndef CONFIG_SMP | ||
211 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
212 | #else | ||
213 | for_each_online_cpu(j) | ||
214 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
215 | #endif | ||
216 | seq_printf(p, " %14s", irq_desc[i].chip->typename); | ||
217 | seq_printf(p, " %s", action->name); | ||
218 | |||
219 | for (action = action->next; action; action = action->next) | ||
220 | seq_printf(p, ", %s", action->name); | ||
221 | |||
222 | seq_putc(p, '\n'); | ||
223 | skip: | ||
224 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
225 | } | ||
226 | return 0; | ||
227 | } | ||
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c new file mode 100644 index 000000000000..ed3e1cb8dcc4 --- /dev/null +++ b/arch/tile/kernel/machine_kexec.c | |||
@@ -0,0 +1,291 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on machine_kexec.c from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/kexec.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/reboot.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include <linux/cpumask.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/elf.h> | ||
26 | #include <linux/highmem.h> | ||
27 | #include <linux/mmu_context.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/pgalloc.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/checksum.h> | ||
34 | #include <hv/hypervisor.h> | ||
35 | |||
36 | |||
37 | /* | ||
38 | * This stuff is not in elf.h and is not in any other kernel include. | ||
39 | * This stuff is needed below in the little boot notes parser to | ||
40 | * extract the command line so we can pass it to the hypervisor. | ||
41 | */ | ||
42 | struct Elf32_Bhdr { | ||
43 | Elf32_Word b_signature; | ||
44 | Elf32_Word b_size; | ||
45 | Elf32_Half b_checksum; | ||
46 | Elf32_Half b_records; | ||
47 | }; | ||
48 | #define ELF_BOOT_MAGIC 0x0E1FB007 | ||
49 | #define EBN_COMMAND_LINE 0x00000004 | ||
50 | #define roundupsz(X) (((X) + 3) & ~3) | ||
51 | |||
52 | /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ | ||
53 | |||
54 | |||
55 | void machine_shutdown(void) | ||
56 | { | ||
57 | /* | ||
58 | * Normally we would stop all the other processors here, but | ||
59 | * the check in machine_kexec_prepare below ensures we'll only | ||
60 | * get this far if we've been booted with "nosmp" on the | ||
61 | * command line or without CONFIG_SMP so there's nothing to do | ||
62 | * here (for now). | ||
63 | */ | ||
64 | } | ||
65 | |||
66 | void machine_crash_shutdown(struct pt_regs *regs) | ||
67 | { | ||
68 | /* | ||
69 | * Cannot happen. This type of kexec is disabled on this | ||
70 | * architecture (and enforced in machine_kexec_prepare below). | ||
71 | */ | ||
72 | } | ||
73 | |||
74 | |||
75 | int machine_kexec_prepare(struct kimage *image) | ||
76 | { | ||
77 | if (num_online_cpus() > 1) { | ||
78 | printk(KERN_WARNING "%s: detected attempt to kexec " | ||
79 | "with num_online_cpus() > 1\n", | ||
80 | __func__); | ||
81 | return -ENOSYS; | ||
82 | } | ||
83 | if (image->type != KEXEC_TYPE_DEFAULT) { | ||
84 | printk(KERN_WARNING "%s: detected attempt to kexec " | ||
85 | "with unsupported type: %d\n", | ||
86 | __func__, | ||
87 | image->type); | ||
88 | return -ENOSYS; | ||
89 | } | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | void machine_kexec_cleanup(struct kimage *image) | ||
94 | { | ||
95 | /* | ||
96 | * We did nothing in machine_kexec_prepare, | ||
97 | * so we have nothing to do here. | ||
98 | */ | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * If we can find elf boot notes on this page, return the command | ||
103 | * line. Otherwise, silently return null. Somewhat kludgy, but no | ||
104 | * good way to do this without significantly rearchitecting the | ||
105 | * architecture-independent kexec code. | ||
106 | */ | ||
107 | |||
108 | static unsigned char *kexec_bn2cl(void *pg) | ||
109 | { | ||
110 | struct Elf32_Bhdr *bhdrp; | ||
111 | Elf32_Nhdr *nhdrp; | ||
112 | unsigned char *desc; | ||
113 | unsigned char *command_line; | ||
114 | __sum16 csum; | ||
115 | |||
116 | bhdrp = (struct Elf32_Bhdr *) pg; | ||
117 | |||
118 | /* | ||
119 | * This routine is invoked for every source page, so make | ||
120 | * sure to quietly ignore every impossible page. | ||
121 | */ | ||
122 | if (bhdrp->b_signature != ELF_BOOT_MAGIC || | ||
123 | bhdrp->b_size > PAGE_SIZE) | ||
124 | return 0; | ||
125 | |||
126 | /* | ||
127 | * If we get a checksum mismatch, it's possible that this is | ||
128 | * just a false positive, but relatively unlikely. We dump | ||
129 | * out the contents of the section so we can diagnose better. | ||
130 | */ | ||
131 | csum = ip_compute_csum(pg, bhdrp->b_size); | ||
132 | if (csum != 0) { | ||
133 | int i; | ||
134 | unsigned char *p = pg; | ||
135 | int nbytes = min((Elf32_Word)1000, bhdrp->b_size); | ||
136 | printk(KERN_INFO "%s: bad checksum %#x\n", __func__, csum); | ||
137 | printk(KERN_INFO "bytes (%d):", bhdrp->b_size); | ||
138 | for (i = 0; i < nbytes; ++i) | ||
139 | printk(" %02x", p[i]); | ||
140 | if (bhdrp->b_size != nbytes) | ||
141 | printk(" ..."); | ||
142 | printk("\n"); | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | nhdrp = (Elf32_Nhdr *) (bhdrp + 1); | ||
147 | |||
148 | while (nhdrp->n_type != EBN_COMMAND_LINE) { | ||
149 | |||
150 | desc = (unsigned char *) (nhdrp + 1); | ||
151 | desc += roundupsz(nhdrp->n_descsz); | ||
152 | |||
153 | nhdrp = (Elf32_Nhdr *) desc; | ||
154 | |||
155 | /* still in bounds? */ | ||
156 | if ((unsigned char *) (nhdrp + 1) > | ||
157 | ((unsigned char *) pg) + bhdrp->b_size) { | ||
158 | |||
159 | printk(KERN_INFO "%s: out of bounds\n", __func__); | ||
160 | return 0; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | command_line = (unsigned char *) (nhdrp + 1); | ||
165 | desc = command_line; | ||
166 | |||
167 | while (*desc != '\0') { | ||
168 | desc++; | ||
169 | if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) { | ||
170 | printk(KERN_INFO "%s: ran off end of page\n", | ||
171 | __func__); | ||
172 | return 0; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | return command_line; | ||
177 | } | ||
178 | |||
179 | static void kexec_find_and_set_command_line(struct kimage *image) | ||
180 | { | ||
181 | kimage_entry_t *ptr, entry; | ||
182 | |||
183 | unsigned char *command_line = 0; | ||
184 | unsigned char *r; | ||
185 | HV_Errno hverr; | ||
186 | |||
187 | for (ptr = &image->head; | ||
188 | (entry = *ptr) && !(entry & IND_DONE); | ||
189 | ptr = (entry & IND_INDIRECTION) ? | ||
190 | phys_to_virt((entry & PAGE_MASK)) : ptr + 1) { | ||
191 | |||
192 | if ((entry & IND_SOURCE)) { | ||
193 | void *va = | ||
194 | kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0); | ||
195 | r = kexec_bn2cl(va); | ||
196 | if (r) { | ||
197 | command_line = r; | ||
198 | break; | ||
199 | } | ||
200 | kunmap_atomic(va, KM_USER0); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | if (command_line != 0) { | ||
205 | printk(KERN_INFO "setting new command line to \"%s\"\n", | ||
206 | command_line); | ||
207 | |||
208 | hverr = hv_set_command_line( | ||
209 | (HV_VirtAddr) command_line, strlen(command_line)); | ||
210 | kunmap_atomic(command_line, KM_USER0); | ||
211 | } else { | ||
212 | printk(KERN_INFO "%s: no command line found; making empty\n", | ||
213 | __func__); | ||
214 | hverr = hv_set_command_line((HV_VirtAddr) command_line, 0); | ||
215 | } | ||
216 | if (hverr) { | ||
217 | printk(KERN_WARNING | ||
218 | "%s: call to hv_set_command_line returned error: %d\n", | ||
219 | __func__, hverr); | ||
220 | |||
221 | } | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * The kexec code range-checks all its PAs, so to avoid having it run | ||
226 | * amok and allocate memory and then sequester it from every other | ||
227 | * controller, we force it to come from controller zero. We also | ||
228 | * disable the oom-killer since if we do end up running out of memory, | ||
229 | * that almost certainly won't help. | ||
230 | */ | ||
231 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order) | ||
232 | { | ||
233 | gfp_mask |= __GFP_THISNODE | __GFP_NORETRY; | ||
234 | return alloc_pages_node(0, gfp_mask, order); | ||
235 | } | ||
236 | |||
237 | static void setup_quasi_va_is_pa(void) | ||
238 | { | ||
239 | HV_PTE *pgtable; | ||
240 | HV_PTE pte; | ||
241 | int i; | ||
242 | |||
243 | /* | ||
244 | * Flush our TLB to prevent conflicts between the previous contents | ||
245 | * and the new stuff we're about to add. | ||
246 | */ | ||
247 | local_flush_tlb_all(); | ||
248 | |||
249 | /* setup VA is PA, at least up to PAGE_OFFSET */ | ||
250 | |||
251 | pgtable = (HV_PTE *)current->mm->pgd; | ||
252 | pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); | ||
253 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
254 | |||
255 | for (i = 0; i < pgd_index(PAGE_OFFSET); i++) | ||
256 | pgtable[i] = pfn_pte(i << (HPAGE_SHIFT - PAGE_SHIFT), pte); | ||
257 | } | ||
258 | |||
259 | |||
260 | NORET_TYPE void machine_kexec(struct kimage *image) | ||
261 | { | ||
262 | void *reboot_code_buffer; | ||
263 | NORET_TYPE void (*rnk)(unsigned long, void *, unsigned long) | ||
264 | ATTRIB_NORET; | ||
265 | |||
266 | /* Mask all interrupts before starting to reboot. */ | ||
267 | interrupt_mask_set_mask(~0ULL); | ||
268 | |||
269 | kexec_find_and_set_command_line(image); | ||
270 | |||
271 | /* | ||
272 | * Adjust the home caching of the control page to be cached on | ||
273 | * this cpu, and copy the assembly helper into the control | ||
274 | * code page, which we map in the vmalloc area. | ||
275 | */ | ||
276 | homecache_change_page_home(image->control_code_page, 0, | ||
277 | smp_processor_id()); | ||
278 | reboot_code_buffer = vmap(&image->control_code_page, 1, 0, | ||
279 | __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); | ||
280 | memcpy(reboot_code_buffer, relocate_new_kernel, | ||
281 | relocate_new_kernel_size); | ||
282 | __flush_icache_range( | ||
283 | (unsigned long) reboot_code_buffer, | ||
284 | (unsigned long) reboot_code_buffer + relocate_new_kernel_size); | ||
285 | |||
286 | setup_quasi_va_is_pa(); | ||
287 | |||
288 | /* now call it */ | ||
289 | rnk = reboot_code_buffer; | ||
290 | (*rnk)(image->head, reboot_code_buffer, image->start); | ||
291 | } | ||
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c new file mode 100644 index 000000000000..f991f5285d8a --- /dev/null +++ b/arch/tile/kernel/messaging.c | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/hardirq.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <asm/hv_driver.h> | ||
20 | #include <asm/irq_regs.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | #include <arch/interrupts.h> | ||
23 | |||
24 | /* All messages are stored here */ | ||
25 | static DEFINE_PER_CPU(HV_MsgState, msg_state); | ||
26 | |||
27 | void __cpuinit init_messaging() | ||
28 | { | ||
29 | /* Allocate storage for messages in kernel space */ | ||
30 | HV_MsgState *state = &__get_cpu_var(msg_state); | ||
31 | int rc = hv_register_message_state(state); | ||
32 | if (rc != HV_OK) | ||
33 | panic("hv_register_message_state: error %d", rc); | ||
34 | |||
35 | /* Make sure downcall interrupts will be enabled. */ | ||
36 | raw_local_irq_unmask(INT_INTCTRL_1); | ||
37 | } | ||
38 | |||
39 | void hv_message_intr(struct pt_regs *regs, int intnum) | ||
40 | { | ||
41 | /* | ||
42 | * We enter with interrupts disabled and leave them disabled, | ||
43 | * to match expectations of called functions (e.g. | ||
44 | * do_ccupdate_local() in mm/slab.c). This is also consistent | ||
45 | * with normal call entry for device interrupts. | ||
46 | */ | ||
47 | |||
48 | int message[HV_MAX_MESSAGE_SIZE/sizeof(int)]; | ||
49 | HV_RcvMsgInfo rmi; | ||
50 | int nmsgs = 0; | ||
51 | |||
52 | /* Track time spent here in an interrupt context */ | ||
53 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
54 | irq_enter(); | ||
55 | |||
56 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
57 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | ||
58 | { | ||
59 | long sp = stack_pointer - (long) current_thread_info(); | ||
60 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
61 | printk(KERN_EMERG "hv_message_intr: " | ||
62 | "stack overflow: %ld\n", | ||
63 | sp - sizeof(struct thread_info)); | ||
64 | dump_stack(); | ||
65 | } | ||
66 | } | ||
67 | #endif | ||
68 | |||
69 | while (1) { | ||
70 | rmi = hv_receive_message(__get_cpu_var(msg_state), | ||
71 | (HV_VirtAddr) message, | ||
72 | sizeof(message)); | ||
73 | if (rmi.msglen == 0) | ||
74 | break; | ||
75 | |||
76 | if (rmi.msglen < 0) | ||
77 | panic("hv_receive_message failed: %d", rmi.msglen); | ||
78 | |||
79 | ++nmsgs; | ||
80 | |||
81 | if (rmi.source == HV_MSG_TILE) { | ||
82 | int tag; | ||
83 | |||
84 | /* we just send tags for now */ | ||
85 | BUG_ON(rmi.msglen != sizeof(int)); | ||
86 | |||
87 | tag = message[0]; | ||
88 | #ifdef CONFIG_SMP | ||
89 | evaluate_message(message[0]); | ||
90 | #else | ||
91 | panic("Received IPI message %d in UP mode", tag); | ||
92 | #endif | ||
93 | } else if (rmi.source == HV_MSG_INTR) { | ||
94 | HV_IntrMsg *him = (HV_IntrMsg *)message; | ||
95 | struct hv_driver_cb *cb = | ||
96 | (struct hv_driver_cb *)him->intarg; | ||
97 | cb->callback(cb, him->intdata); | ||
98 | __get_cpu_var(irq_stat).irq_hv_msg_count++; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * We shouldn't have gotten a message downcall with no | ||
104 | * messages available. | ||
105 | */ | ||
106 | if (nmsgs == 0) | ||
107 | panic("Message downcall invoked with no messages!"); | ||
108 | |||
109 | /* | ||
110 | * Track time spent against the current process again and | ||
111 | * process any softirqs if they are waiting. | ||
112 | */ | ||
113 | irq_exit(); | ||
114 | set_irq_regs(old_regs); | ||
115 | } | ||
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c new file mode 100644 index 000000000000..ed3e91161f88 --- /dev/null +++ b/arch/tile/kernel/module.c | |||
@@ -0,0 +1,257 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Based on i386 version, copyright (C) 2001 Rusty Russell. | ||
15 | */ | ||
16 | |||
17 | #include <linux/moduleloader.h> | ||
18 | #include <linux/elf.h> | ||
19 | #include <linux/vmalloc.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <asm/opcode-tile.h> | ||
24 | #include <asm/pgtable.h> | ||
25 | |||
26 | #ifdef __tilegx__ | ||
27 | # define Elf_Rela Elf64_Rela | ||
28 | # define ELF_R_SYM ELF64_R_SYM | ||
29 | # define ELF_R_TYPE ELF64_R_TYPE | ||
30 | #else | ||
31 | # define Elf_Rela Elf32_Rela | ||
32 | # define ELF_R_SYM ELF32_R_SYM | ||
33 | # define ELF_R_TYPE ELF32_R_TYPE | ||
34 | #endif | ||
35 | |||
36 | #ifdef MODULE_DEBUG | ||
37 | #define DEBUGP printk | ||
38 | #else | ||
39 | #define DEBUGP(fmt...) | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * Allocate some address space in the range MEM_MODULE_START to | ||
44 | * MEM_MODULE_END and populate it with memory. | ||
45 | */ | ||
46 | void *module_alloc(unsigned long size) | ||
47 | { | ||
48 | struct page **pages; | ||
49 | pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC); | ||
50 | struct vm_struct *area; | ||
51 | int i = 0; | ||
52 | int npages; | ||
53 | |||
54 | if (size == 0) | ||
55 | return NULL; | ||
56 | npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | ||
57 | pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); | ||
58 | if (pages == NULL) | ||
59 | return NULL; | ||
60 | for (; i < npages; ++i) { | ||
61 | pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
62 | if (!pages[i]) | ||
63 | goto error; | ||
64 | } | ||
65 | |||
66 | area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); | ||
67 | if (!area) | ||
68 | goto error; | ||
69 | |||
70 | if (map_vm_area(area, prot_rwx, &pages)) { | ||
71 | vunmap(area->addr); | ||
72 | goto error; | ||
73 | } | ||
74 | |||
75 | return area->addr; | ||
76 | |||
77 | error: | ||
78 | while (--i >= 0) | ||
79 | __free_page(pages[i]); | ||
80 | kfree(pages); | ||
81 | return NULL; | ||
82 | } | ||
83 | |||
84 | |||
85 | /* Free memory returned from module_alloc */ | ||
86 | void module_free(struct module *mod, void *module_region) | ||
87 | { | ||
88 | vfree(module_region); | ||
89 | /* | ||
90 | * FIXME: If module_region == mod->init_region, trim exception | ||
91 | * table entries. | ||
92 | */ | ||
93 | } | ||
94 | |||
95 | /* We don't need anything special. */ | ||
96 | int module_frob_arch_sections(Elf_Ehdr *hdr, | ||
97 | Elf_Shdr *sechdrs, | ||
98 | char *secstrings, | ||
99 | struct module *mod) | ||
100 | { | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int apply_relocate(Elf_Shdr *sechdrs, | ||
105 | const char *strtab, | ||
106 | unsigned int symindex, | ||
107 | unsigned int relsec, | ||
108 | struct module *me) | ||
109 | { | ||
110 | printk(KERN_ERR "module %s: .rel relocation unsupported\n", me->name); | ||
111 | return -ENOEXEC; | ||
112 | } | ||
113 | |||
114 | #ifdef __tilegx__ | ||
115 | /* | ||
116 | * Validate that the high 16 bits of "value" is just the sign-extension of | ||
117 | * the low 48 bits. | ||
118 | */ | ||
119 | static int validate_hw2_last(long value, struct module *me) | ||
120 | { | ||
121 | if (((value << 16) >> 16) != value) { | ||
122 | printk("module %s: Out of range HW2_LAST value %#lx\n", | ||
123 | me->name, value); | ||
124 | return 0; | ||
125 | } | ||
126 | return 1; | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Validate that "value" isn't too big to hold in a JumpOff relocation. | ||
131 | */ | ||
132 | static int validate_jumpoff(long value) | ||
133 | { | ||
134 | /* Determine size of jump offset. */ | ||
135 | int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1))); | ||
136 | |||
137 | /* Check to see if it fits into the relocation slot. */ | ||
138 | long f = get_JumpOff_X1(create_JumpOff_X1(value)); | ||
139 | f = (f << shift) >> shift; | ||
140 | |||
141 | return f == value; | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | int apply_relocate_add(Elf_Shdr *sechdrs, | ||
146 | const char *strtab, | ||
147 | unsigned int symindex, | ||
148 | unsigned int relsec, | ||
149 | struct module *me) | ||
150 | { | ||
151 | unsigned int i; | ||
152 | Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; | ||
153 | Elf_Sym *sym; | ||
154 | u64 *location; | ||
155 | unsigned long value; | ||
156 | |||
157 | DEBUGP("Applying relocate section %u to %u\n", relsec, | ||
158 | sechdrs[relsec].sh_info); | ||
159 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
160 | /* This is where to make the change */ | ||
161 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
162 | + rel[i].r_offset; | ||
163 | /* | ||
164 | * This is the symbol it is referring to. | ||
165 | * Note that all undefined symbols have been resolved. | ||
166 | */ | ||
167 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
168 | + ELF_R_SYM(rel[i].r_info); | ||
169 | value = sym->st_value + rel[i].r_addend; | ||
170 | |||
171 | switch (ELF_R_TYPE(rel[i].r_info)) { | ||
172 | |||
173 | #define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value))) | ||
174 | |||
175 | #ifndef __tilegx__ | ||
176 | case R_TILE_32: | ||
177 | *(uint32_t *)location = value; | ||
178 | break; | ||
179 | case R_TILE_IMM16_X0_HA: | ||
180 | value = (value + 0x8000) >> 16; | ||
181 | /*FALLTHROUGH*/ | ||
182 | case R_TILE_IMM16_X0_LO: | ||
183 | MUNGE(create_Imm16_X0); | ||
184 | break; | ||
185 | case R_TILE_IMM16_X1_HA: | ||
186 | value = (value + 0x8000) >> 16; | ||
187 | /*FALLTHROUGH*/ | ||
188 | case R_TILE_IMM16_X1_LO: | ||
189 | MUNGE(create_Imm16_X1); | ||
190 | break; | ||
191 | case R_TILE_JOFFLONG_X1: | ||
192 | value -= (unsigned long) location; /* pc-relative */ | ||
193 | value = (long) value >> 3; /* count by instrs */ | ||
194 | MUNGE(create_JOffLong_X1); | ||
195 | break; | ||
196 | #else | ||
197 | case R_TILEGX_64: | ||
198 | *location = value; | ||
199 | break; | ||
200 | case R_TILEGX_IMM16_X0_HW2_LAST: | ||
201 | if (!validate_hw2_last(value, me)) | ||
202 | return -ENOEXEC; | ||
203 | value >>= 16; | ||
204 | /*FALLTHROUGH*/ | ||
205 | case R_TILEGX_IMM16_X0_HW1: | ||
206 | value >>= 16; | ||
207 | /*FALLTHROUGH*/ | ||
208 | case R_TILEGX_IMM16_X0_HW0: | ||
209 | MUNGE(create_Imm16_X0); | ||
210 | break; | ||
211 | case R_TILEGX_IMM16_X1_HW2_LAST: | ||
212 | if (!validate_hw2_last(value, me)) | ||
213 | return -ENOEXEC; | ||
214 | value >>= 16; | ||
215 | /*FALLTHROUGH*/ | ||
216 | case R_TILEGX_IMM16_X1_HW1: | ||
217 | value >>= 16; | ||
218 | /*FALLTHROUGH*/ | ||
219 | case R_TILEGX_IMM16_X1_HW0: | ||
220 | MUNGE(create_Imm16_X1); | ||
221 | break; | ||
222 | case R_TILEGX_JUMPOFF_X1: | ||
223 | value -= (unsigned long) location; /* pc-relative */ | ||
224 | value = (long) value >> 3; /* count by instrs */ | ||
225 | if (!validate_jumpoff(value)) { | ||
226 | printk("module %s: Out of range jump to" | ||
227 | " %#llx at %#llx (%p)\n", me->name, | ||
228 | sym->st_value + rel[i].r_addend, | ||
229 | rel[i].r_offset, location); | ||
230 | return -ENOEXEC; | ||
231 | } | ||
232 | MUNGE(create_JumpOff_X1); | ||
233 | break; | ||
234 | #endif | ||
235 | |||
236 | #undef MUNGE | ||
237 | |||
238 | default: | ||
239 | printk(KERN_ERR "module %s: Unknown relocation: %d\n", | ||
240 | me->name, (int) ELF_R_TYPE(rel[i].r_info)); | ||
241 | return -ENOEXEC; | ||
242 | } | ||
243 | } | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | int module_finalize(const Elf_Ehdr *hdr, | ||
248 | const Elf_Shdr *sechdrs, | ||
249 | struct module *me) | ||
250 | { | ||
251 | /* FIXME: perhaps remove the "writable" bit from the TLB? */ | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | void module_arch_cleanup(struct module *mod) | ||
256 | { | ||
257 | } | ||
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c new file mode 100644 index 000000000000..1d456404f065 --- /dev/null +++ b/arch/tile/kernel/pci-dma.c | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/dma-mapping.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/homecache.h> | ||
20 | |||
21 | /* Generic DMA mapping functions: */ | ||
22 | |||
23 | /* | ||
24 | * Allocate what Linux calls "coherent" memory, which for us just | ||
25 | * means uncached. | ||
26 | */ | ||
27 | void *dma_alloc_coherent(struct device *dev, | ||
28 | size_t size, | ||
29 | dma_addr_t *dma_handle, | ||
30 | gfp_t gfp) | ||
31 | { | ||
32 | u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); | ||
33 | int node = dev_to_node(dev); | ||
34 | int order = get_order(size); | ||
35 | struct page *pg; | ||
36 | dma_addr_t addr; | ||
37 | |||
38 | /* Set GFP_KERNEL to ensure we have memory with a kernel VA. */ | ||
39 | gfp |= GFP_KERNEL | __GFP_ZERO; | ||
40 | |||
41 | /* | ||
42 | * By forcing NUMA node 0 for 32-bit masks we ensure that the | ||
43 | * high 32 bits of the resulting PA will be zero. If the mask | ||
44 | * size is, e.g., 24, we may still not be able to guarantee a | ||
45 | * suitable memory address, in which case we will return NULL. | ||
46 | * But such devices are uncommon. | ||
47 | */ | ||
48 | if (dma_mask <= DMA_BIT_MASK(32)) | ||
49 | node = 0; | ||
50 | |||
51 | pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED); | ||
52 | if (pg == NULL) | ||
53 | return NULL; | ||
54 | |||
55 | addr = page_to_phys(pg); | ||
56 | if (addr + size > dma_mask) { | ||
57 | homecache_free_pages(addr, order); | ||
58 | return NULL; | ||
59 | } | ||
60 | |||
61 | *dma_handle = addr; | ||
62 | return page_address(pg); | ||
63 | } | ||
64 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
65 | |||
66 | /* | ||
67 | * Free memory that was allocated with dma_alloc_coherent. | ||
68 | */ | ||
69 | void dma_free_coherent(struct device *dev, size_t size, | ||
70 | void *vaddr, dma_addr_t dma_handle) | ||
71 | { | ||
72 | homecache_free_pages((unsigned long)vaddr, get_order(size)); | ||
73 | } | ||
74 | EXPORT_SYMBOL(dma_free_coherent); | ||
75 | |||
76 | /* | ||
77 | * The map routines "map" the specified address range for DMA | ||
78 | * accesses. The memory belongs to the device after this call is | ||
79 | * issued, until it is unmapped with dma_unmap_single. | ||
80 | * | ||
81 | * We don't need to do any mapping, we just flush the address range | ||
82 | * out of the cache and return a DMA address. | ||
83 | * | ||
84 | * The unmap routines do whatever is necessary before the processor | ||
85 | * accesses the memory again, and must be called before the driver | ||
86 | * touches the memory. We can get away with a cache invalidate if we | ||
87 | * can count on nothing having been touched. | ||
88 | */ | ||
89 | |||
90 | |||
91 | /* | ||
92 | * dma_map_single can be passed any memory address, and there appear | ||
93 | * to be no alignment constraints. | ||
94 | * | ||
95 | * There is a chance that the start of the buffer will share a cache | ||
96 | * line with some other data that has been touched in the meantime. | ||
97 | */ | ||
98 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
99 | enum dma_data_direction direction) | ||
100 | { | ||
101 | struct page *page; | ||
102 | dma_addr_t dma_addr; | ||
103 | int thispage; | ||
104 | |||
105 | BUG_ON(!valid_dma_direction(direction)); | ||
106 | WARN_ON(size == 0); | ||
107 | |||
108 | dma_addr = __pa(ptr); | ||
109 | |||
110 | /* We might have been handed a buffer that wraps a page boundary */ | ||
111 | while ((int)size > 0) { | ||
112 | /* The amount to flush that's on this page */ | ||
113 | thispage = PAGE_SIZE - ((unsigned long)ptr & (PAGE_SIZE - 1)); | ||
114 | thispage = min((int)thispage, (int)size); | ||
115 | /* Is this valid for any page we could be handed? */ | ||
116 | page = pfn_to_page(kaddr_to_pfn(ptr)); | ||
117 | homecache_flush_cache(page, 0); | ||
118 | ptr += thispage; | ||
119 | size -= thispage; | ||
120 | } | ||
121 | |||
122 | return dma_addr; | ||
123 | } | ||
124 | EXPORT_SYMBOL(dma_map_single); | ||
125 | |||
126 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
127 | enum dma_data_direction direction) | ||
128 | { | ||
129 | BUG_ON(!valid_dma_direction(direction)); | ||
130 | } | ||
131 | EXPORT_SYMBOL(dma_unmap_single); | ||
132 | |||
133 | int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | ||
134 | enum dma_data_direction direction) | ||
135 | { | ||
136 | struct scatterlist *sg; | ||
137 | int i; | ||
138 | |||
139 | BUG_ON(!valid_dma_direction(direction)); | ||
140 | |||
141 | WARN_ON(nents == 0 || sglist->length == 0); | ||
142 | |||
143 | for_each_sg(sglist, sg, nents, i) { | ||
144 | struct page *page; | ||
145 | sg->dma_address = sg_phys(sg); | ||
146 | page = pfn_to_page(sg->dma_address >> PAGE_SHIFT); | ||
147 | homecache_flush_cache(page, 0); | ||
148 | } | ||
149 | |||
150 | return nents; | ||
151 | } | ||
152 | EXPORT_SYMBOL(dma_map_sg); | ||
153 | |||
154 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, | ||
155 | enum dma_data_direction direction) | ||
156 | { | ||
157 | BUG_ON(!valid_dma_direction(direction)); | ||
158 | } | ||
159 | EXPORT_SYMBOL(dma_unmap_sg); | ||
160 | |||
161 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
162 | unsigned long offset, size_t size, | ||
163 | enum dma_data_direction direction) | ||
164 | { | ||
165 | BUG_ON(!valid_dma_direction(direction)); | ||
166 | |||
167 | homecache_flush_cache(page, 0); | ||
168 | |||
169 | return page_to_pa(page) + offset; | ||
170 | } | ||
171 | EXPORT_SYMBOL(dma_map_page); | ||
172 | |||
173 | void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, | ||
174 | enum dma_data_direction direction) | ||
175 | { | ||
176 | BUG_ON(!valid_dma_direction(direction)); | ||
177 | } | ||
178 | EXPORT_SYMBOL(dma_unmap_page); | ||
179 | |||
180 | void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
181 | size_t size, enum dma_data_direction direction) | ||
182 | { | ||
183 | BUG_ON(!valid_dma_direction(direction)); | ||
184 | } | ||
185 | EXPORT_SYMBOL(dma_sync_single_for_cpu); | ||
186 | |||
187 | void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, | ||
188 | size_t size, enum dma_data_direction direction) | ||
189 | { | ||
190 | unsigned long start = PFN_DOWN(dma_handle); | ||
191 | unsigned long end = PFN_DOWN(dma_handle + size - 1); | ||
192 | unsigned long i; | ||
193 | |||
194 | BUG_ON(!valid_dma_direction(direction)); | ||
195 | for (i = start; i <= end; ++i) | ||
196 | homecache_flush_cache(pfn_to_page(i), 0); | ||
197 | } | ||
198 | EXPORT_SYMBOL(dma_sync_single_for_device); | ||
199 | |||
200 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, | ||
201 | enum dma_data_direction direction) | ||
202 | { | ||
203 | BUG_ON(!valid_dma_direction(direction)); | ||
204 | WARN_ON(nelems == 0 || sg[0].length == 0); | ||
205 | } | ||
206 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
207 | |||
208 | /* | ||
209 | * Flush and invalidate cache for scatterlist. | ||
210 | */ | ||
211 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, | ||
212 | int nelems, enum dma_data_direction direction) | ||
213 | { | ||
214 | struct scatterlist *sg; | ||
215 | int i; | ||
216 | |||
217 | BUG_ON(!valid_dma_direction(direction)); | ||
218 | WARN_ON(nelems == 0 || sglist->length == 0); | ||
219 | |||
220 | for_each_sg(sglist, sg, nelems, i) { | ||
221 | dma_sync_single_for_device(dev, sg->dma_address, | ||
222 | sg_dma_len(sg), direction); | ||
223 | } | ||
224 | } | ||
225 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
226 | |||
227 | void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, | ||
228 | unsigned long offset, size_t size, | ||
229 | enum dma_data_direction direction) | ||
230 | { | ||
231 | dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction); | ||
232 | } | ||
233 | EXPORT_SYMBOL(dma_sync_single_range_for_cpu); | ||
234 | |||
235 | void dma_sync_single_range_for_device(struct device *dev, | ||
236 | dma_addr_t dma_handle, | ||
237 | unsigned long offset, size_t size, | ||
238 | enum dma_data_direction direction) | ||
239 | { | ||
240 | dma_sync_single_for_device(dev, dma_handle + offset, size, direction); | ||
241 | } | ||
242 | EXPORT_SYMBOL(dma_sync_single_range_for_device); | ||
243 | |||
244 | /* | ||
245 | * dma_alloc_noncoherent() returns non-cacheable memory, so there's no | ||
246 | * need to do any flushing here. | ||
247 | */ | ||
248 | void dma_cache_sync(void *vaddr, size_t size, | ||
249 | enum dma_data_direction direction) | ||
250 | { | ||
251 | } | ||
252 | EXPORT_SYMBOL(dma_cache_sync); | ||
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c new file mode 100644 index 000000000000..92ef925d2f8d --- /dev/null +++ b/arch/tile/kernel/proc.c | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/smp.h> | ||
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/threads.h> | ||
18 | #include <linux/cpumask.h> | ||
19 | #include <linux/timex.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/proc_fs.h> | ||
23 | #include <linux/sysctl.h> | ||
24 | #include <linux/hardirq.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/sections.h> | ||
30 | #include <asm/homecache.h> | ||
31 | #include <arch/chip.h> | ||
32 | |||
33 | |||
34 | /* | ||
35 | * Support /proc/cpuinfo | ||
36 | */ | ||
37 | |||
38 | #define cpu_to_ptr(n) ((void *)((long)(n)+1)) | ||
39 | #define ptr_to_cpu(p) ((long)(p) - 1) | ||
40 | |||
41 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
42 | { | ||
43 | int n = ptr_to_cpu(v); | ||
44 | |||
45 | if (n == 0) { | ||
46 | char buf[NR_CPUS*5]; | ||
47 | cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask); | ||
48 | seq_printf(m, "cpu count\t: %d\n", num_online_cpus()); | ||
49 | seq_printf(m, "cpu list\t: %s\n", buf); | ||
50 | seq_printf(m, "model name\t: %s\n", chip_model); | ||
51 | seq_printf(m, "flags\t\t:\n"); /* nothing for now */ | ||
52 | seq_printf(m, "cpu MHz\t\t: %llu.%06llu\n", | ||
53 | get_clock_rate() / 1000000, | ||
54 | (get_clock_rate() % 1000000)); | ||
55 | seq_printf(m, "bogomips\t: %lu.%02lu\n\n", | ||
56 | loops_per_jiffy/(500000/HZ), | ||
57 | (loops_per_jiffy/(5000/HZ)) % 100); | ||
58 | } | ||
59 | |||
60 | #ifdef CONFIG_SMP | ||
61 | if (!cpu_online(n)) | ||
62 | return 0; | ||
63 | #endif | ||
64 | |||
65 | seq_printf(m, "processor\t: %d\n", n); | ||
66 | |||
67 | /* Print only num_online_cpus() blank lines total. */ | ||
68 | if (cpumask_next(n, cpu_online_mask) < nr_cpu_ids) | ||
69 | seq_printf(m, "\n"); | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
75 | { | ||
76 | return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; | ||
77 | } | ||
78 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
79 | { | ||
80 | ++*pos; | ||
81 | return c_start(m, pos); | ||
82 | } | ||
83 | static void c_stop(struct seq_file *m, void *v) | ||
84 | { | ||
85 | } | ||
86 | const struct seq_operations cpuinfo_op = { | ||
87 | .start = c_start, | ||
88 | .next = c_next, | ||
89 | .stop = c_stop, | ||
90 | .show = show_cpuinfo, | ||
91 | }; | ||
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c new file mode 100644 index 000000000000..824f230e6d1a --- /dev/null +++ b/arch/tile/kernel/process.c | |||
@@ -0,0 +1,647 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/preempt.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/elfcore.h> | ||
21 | #include <linux/tick.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/compat.h> | ||
25 | #include <linux/hardirq.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <asm/system.h> | ||
28 | #include <asm/stack.h> | ||
29 | #include <asm/homecache.h> | ||
30 | #include <arch/chip.h> | ||
31 | #include <arch/abi.h> | ||
32 | |||
33 | |||
34 | /* | ||
35 | * Use the (x86) "idle=poll" option to prefer low latency when leaving the | ||
36 | * idle loop over low power while in the idle loop, e.g. if we have | ||
37 | * one thread per core and we want to get threads out of futex waits fast. | ||
38 | */ | ||
39 | static int no_idle_nap; | ||
40 | static int __init idle_setup(char *str) | ||
41 | { | ||
42 | if (!str) | ||
43 | return -EINVAL; | ||
44 | |||
45 | if (!strcmp(str, "poll")) { | ||
46 | printk("using polling idle threads.\n"); | ||
47 | no_idle_nap = 1; | ||
48 | } else if (!strcmp(str, "halt")) | ||
49 | no_idle_nap = 0; | ||
50 | else | ||
51 | return -1; | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | early_param("idle", idle_setup); | ||
56 | |||
57 | /* | ||
58 | * The idle thread. There's no useful work to be | ||
59 | * done, so just try to conserve power and have a | ||
60 | * low exit latency (ie sit in a loop waiting for | ||
61 | * somebody to say that they'd like to reschedule) | ||
62 | */ | ||
63 | void cpu_idle(void) | ||
64 | { | ||
65 | extern void _cpu_idle(void); | ||
66 | int cpu = smp_processor_id(); | ||
67 | |||
68 | |||
69 | current_thread_info()->status |= TS_POLLING; | ||
70 | |||
71 | if (no_idle_nap) { | ||
72 | while (1) { | ||
73 | while (!need_resched()) | ||
74 | cpu_relax(); | ||
75 | schedule(); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /* endless idle loop with no priority at all */ | ||
80 | while (1) { | ||
81 | tick_nohz_stop_sched_tick(1); | ||
82 | while (!need_resched()) { | ||
83 | if (cpu_is_offline(cpu)) | ||
84 | BUG(); /* no HOTPLUG_CPU */ | ||
85 | |||
86 | local_irq_disable(); | ||
87 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; | ||
88 | current_thread_info()->status &= ~TS_POLLING; | ||
89 | /* | ||
90 | * TS_POLLING-cleared state must be visible before we | ||
91 | * test NEED_RESCHED: | ||
92 | */ | ||
93 | smp_mb(); | ||
94 | |||
95 | if (!need_resched()) | ||
96 | _cpu_idle(); | ||
97 | else | ||
98 | local_irq_enable(); | ||
99 | current_thread_info()->status |= TS_POLLING; | ||
100 | } | ||
101 | tick_nohz_restart_sched_tick(); | ||
102 | preempt_enable_no_resched(); | ||
103 | schedule(); | ||
104 | preempt_disable(); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | struct thread_info *alloc_thread_info(struct task_struct *task) | ||
109 | { | ||
110 | struct page *page; | ||
111 | int flags = GFP_KERNEL; | ||
112 | |||
113 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
114 | flags |= __GFP_ZERO; | ||
115 | #endif | ||
116 | |||
117 | page = alloc_pages(flags, THREAD_SIZE_ORDER); | ||
118 | if (!page) | ||
119 | return 0; | ||
120 | |||
121 | return (struct thread_info *)page_address(page); | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Free a thread_info node, and all of its derivative | ||
126 | * data structures. | ||
127 | */ | ||
128 | void free_thread_info(struct thread_info *info) | ||
129 | { | ||
130 | struct single_step_state *step_state = info->step_state; | ||
131 | |||
132 | |||
133 | if (step_state) { | ||
134 | |||
135 | /* | ||
136 | * FIXME: we don't munmap step_state->buffer | ||
137 | * because the mm_struct for this process (info->task->mm) | ||
138 | * has already been zeroed in exit_mm(). Keeping a | ||
139 | * reference to it here seems like a bad move, so this | ||
140 | * means we can't munmap() the buffer, and therefore if we | ||
141 | * ptrace multiple threads in a process, we will slowly | ||
142 | * leak user memory. (Note that as soon as the last | ||
143 | * thread in a process dies, we will reclaim all user | ||
144 | * memory including single-step buffers in the usual way.) | ||
145 | * We should either assign a kernel VA to this buffer | ||
146 | * somehow, or we should associate the buffer(s) with the | ||
147 | * mm itself so we can clean them up that way. | ||
148 | */ | ||
149 | kfree(step_state); | ||
150 | } | ||
151 | |||
152 | free_page((unsigned long)info); | ||
153 | } | ||
154 | |||
155 | static void save_arch_state(struct thread_struct *t); | ||
156 | |||
157 | extern void ret_from_fork(void); | ||
158 | |||
159 | int copy_thread(unsigned long clone_flags, unsigned long sp, | ||
160 | unsigned long stack_size, | ||
161 | struct task_struct *p, struct pt_regs *regs) | ||
162 | { | ||
163 | struct pt_regs *childregs; | ||
164 | unsigned long ksp; | ||
165 | |||
166 | /* | ||
167 | * When creating a new kernel thread we pass sp as zero. | ||
168 | * Assign it to a reasonable value now that we have the stack. | ||
169 | */ | ||
170 | if (sp == 0 && regs->ex1 == PL_ICS_EX1(KERNEL_PL, 0)) | ||
171 | sp = KSTK_TOP(p); | ||
172 | |||
173 | /* | ||
174 | * Do not clone step state from the parent; each thread | ||
175 | * must make its own lazily. | ||
176 | */ | ||
177 | task_thread_info(p)->step_state = NULL; | ||
178 | |||
179 | /* | ||
180 | * Start new thread in ret_from_fork so it schedules properly | ||
181 | * and then return from interrupt like the parent. | ||
182 | */ | ||
183 | p->thread.pc = (unsigned long) ret_from_fork; | ||
184 | |||
185 | /* Save user stack top pointer so we can ID the stack vm area later. */ | ||
186 | p->thread.usp0 = sp; | ||
187 | |||
188 | /* Record the pid of the process that created this one. */ | ||
189 | p->thread.creator_pid = current->pid; | ||
190 | |||
191 | /* | ||
192 | * Copy the registers onto the kernel stack so the | ||
193 | * return-from-interrupt code will reload it into registers. | ||
194 | */ | ||
195 | childregs = task_pt_regs(p); | ||
196 | *childregs = *regs; | ||
197 | childregs->regs[0] = 0; /* return value is zero */ | ||
198 | childregs->sp = sp; /* override with new user stack pointer */ | ||
199 | |||
200 | /* | ||
201 | * Copy the callee-saved registers from the passed pt_regs struct | ||
202 | * into the context-switch callee-saved registers area. | ||
203 | * We have to restore the callee-saved registers since we may | ||
204 | * be cloning a userspace task with userspace register state, | ||
205 | * and we won't be unwinding the same kernel frames to restore them. | ||
206 | * Zero out the C ABI save area to mark the top of the stack. | ||
207 | */ | ||
208 | ksp = (unsigned long) childregs; | ||
209 | ksp -= C_ABI_SAVE_AREA_SIZE; /* interrupt-entry save area */ | ||
210 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
211 | ksp -= CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long); | ||
212 | memcpy((void *)ksp, ®s->regs[CALLEE_SAVED_FIRST_REG], | ||
213 | CALLEE_SAVED_REGS_COUNT * sizeof(unsigned long)); | ||
214 | ksp -= C_ABI_SAVE_AREA_SIZE; /* __switch_to() save area */ | ||
215 | ((long *)ksp)[0] = ((long *)ksp)[1] = 0; | ||
216 | p->thread.ksp = ksp; | ||
217 | |||
218 | #if CHIP_HAS_TILE_DMA() | ||
219 | /* | ||
220 | * No DMA in the new thread. We model this on the fact that | ||
221 | * fork() clears the pending signals, alarms, and aio for the child. | ||
222 | */ | ||
223 | memset(&p->thread.tile_dma_state, 0, sizeof(struct tile_dma_state)); | ||
224 | memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); | ||
225 | #endif | ||
226 | |||
227 | #if CHIP_HAS_SN_PROC() | ||
228 | /* Likewise, the new thread is not running static processor code. */ | ||
229 | p->thread.sn_proc_running = 0; | ||
230 | memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb)); | ||
231 | #endif | ||
232 | |||
233 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
234 | /* New thread has its miscellaneous processor state bits clear. */ | ||
235 | p->thread.proc_status = 0; | ||
236 | #endif | ||
237 | |||
238 | |||
239 | |||
240 | /* | ||
241 | * Start the new thread with the current architecture state | ||
242 | * (user interrupt masks, etc.). | ||
243 | */ | ||
244 | save_arch_state(&p->thread); | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Return "current" if it looks plausible, or else a pointer to a dummy. | ||
251 | * This can be helpful if we are just trying to emit a clean panic. | ||
252 | */ | ||
253 | struct task_struct *validate_current(void) | ||
254 | { | ||
255 | static struct task_struct corrupt = { .comm = "<corrupt>" }; | ||
256 | struct task_struct *tsk = current; | ||
257 | if (unlikely((unsigned long)tsk < PAGE_OFFSET || | ||
258 | (void *)tsk > high_memory || | ||
259 | ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { | ||
260 | printk("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); | ||
261 | tsk = &corrupt; | ||
262 | } | ||
263 | return tsk; | ||
264 | } | ||
265 | |||
266 | /* Take and return the pointer to the previous task, for schedule_tail(). */ | ||
267 | struct task_struct *sim_notify_fork(struct task_struct *prev) | ||
268 | { | ||
269 | struct task_struct *tsk = current; | ||
270 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK_PARENT | | ||
271 | (tsk->thread.creator_pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
272 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_FORK | | ||
273 | (tsk->pid << _SIM_CONTROL_OPERATOR_BITS)); | ||
274 | return prev; | ||
275 | } | ||
276 | |||
277 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | ||
278 | { | ||
279 | struct pt_regs *ptregs = task_pt_regs(tsk); | ||
280 | elf_core_copy_regs(regs, ptregs); | ||
281 | return 1; | ||
282 | } | ||
283 | |||
284 | #if CHIP_HAS_TILE_DMA() | ||
285 | |||
286 | /* Allow user processes to access the DMA SPRs */ | ||
287 | void grant_dma_mpls(void) | ||
288 | { | ||
289 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_0, 1); | ||
290 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_0, 1); | ||
291 | } | ||
292 | |||
293 | /* Forbid user processes from accessing the DMA SPRs */ | ||
294 | void restrict_dma_mpls(void) | ||
295 | { | ||
296 | __insn_mtspr(SPR_MPL_DMA_CPL_SET_1, 1); | ||
297 | __insn_mtspr(SPR_MPL_DMA_NOTIFY_SET_1, 1); | ||
298 | } | ||
299 | |||
300 | /* Pause the DMA engine, then save off its state registers. */ | ||
301 | static void save_tile_dma_state(struct tile_dma_state *dma) | ||
302 | { | ||
303 | unsigned long state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
304 | unsigned long post_suspend_state; | ||
305 | |||
306 | /* If we're running, suspend the engine. */ | ||
307 | if ((state & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) | ||
308 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | ||
309 | |||
310 | /* | ||
311 | * Wait for the engine to idle, then save regs. Note that we | ||
312 | * want to record the "running" bit from before suspension, | ||
313 | * and the "done" bit from after, so that we can properly | ||
314 | * distinguish a case where the user suspended the engine from | ||
315 | * the case where the kernel suspended as part of the context | ||
316 | * swap. | ||
317 | */ | ||
318 | do { | ||
319 | post_suspend_state = __insn_mfspr(SPR_DMA_USER_STATUS); | ||
320 | } while (post_suspend_state & SPR_DMA_STATUS__BUSY_MASK); | ||
321 | |||
322 | dma->src = __insn_mfspr(SPR_DMA_SRC_ADDR); | ||
323 | dma->src_chunk = __insn_mfspr(SPR_DMA_SRC_CHUNK_ADDR); | ||
324 | dma->dest = __insn_mfspr(SPR_DMA_DST_ADDR); | ||
325 | dma->dest_chunk = __insn_mfspr(SPR_DMA_DST_CHUNK_ADDR); | ||
326 | dma->strides = __insn_mfspr(SPR_DMA_STRIDE); | ||
327 | dma->chunk_size = __insn_mfspr(SPR_DMA_CHUNK_SIZE); | ||
328 | dma->byte = __insn_mfspr(SPR_DMA_BYTE); | ||
329 | dma->status = (state & SPR_DMA_STATUS__RUNNING_MASK) | | ||
330 | (post_suspend_state & SPR_DMA_STATUS__DONE_MASK); | ||
331 | } | ||
332 | |||
333 | /* Restart a DMA that was running before we were context-switched out. */ | ||
334 | static void restore_tile_dma_state(struct thread_struct *t) | ||
335 | { | ||
336 | const struct tile_dma_state *dma = &t->tile_dma_state; | ||
337 | |||
338 | /* | ||
339 | * The only way to restore the done bit is to run a zero | ||
340 | * length transaction. | ||
341 | */ | ||
342 | if ((dma->status & SPR_DMA_STATUS__DONE_MASK) && | ||
343 | !(__insn_mfspr(SPR_DMA_USER_STATUS) & SPR_DMA_STATUS__DONE_MASK)) { | ||
344 | __insn_mtspr(SPR_DMA_BYTE, 0); | ||
345 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
346 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | ||
347 | SPR_DMA_STATUS__BUSY_MASK) | ||
348 | ; | ||
349 | } | ||
350 | |||
351 | __insn_mtspr(SPR_DMA_SRC_ADDR, dma->src); | ||
352 | __insn_mtspr(SPR_DMA_SRC_CHUNK_ADDR, dma->src_chunk); | ||
353 | __insn_mtspr(SPR_DMA_DST_ADDR, dma->dest); | ||
354 | __insn_mtspr(SPR_DMA_DST_CHUNK_ADDR, dma->dest_chunk); | ||
355 | __insn_mtspr(SPR_DMA_STRIDE, dma->strides); | ||
356 | __insn_mtspr(SPR_DMA_CHUNK_SIZE, dma->chunk_size); | ||
357 | __insn_mtspr(SPR_DMA_BYTE, dma->byte); | ||
358 | |||
359 | /* | ||
360 | * Restart the engine if we were running and not done. | ||
361 | * Clear a pending async DMA fault that we were waiting on return | ||
362 | * to user space to execute, since we expect the DMA engine | ||
363 | * to regenerate those faults for us now. Note that we don't | ||
364 | * try to clear the TIF_ASYNC_TLB flag, since it's relatively | ||
365 | * harmless if set, and it covers both DMA and the SN processor. | ||
366 | */ | ||
367 | if ((dma->status & DMA_STATUS_MASK) == SPR_DMA_STATUS__RUNNING_MASK) { | ||
368 | t->dma_async_tlb.fault_num = 0; | ||
369 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | #endif | ||
374 | |||
375 | static void save_arch_state(struct thread_struct *t) | ||
376 | { | ||
377 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
378 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0_0) | | ||
379 | ((u64)__insn_mfspr(SPR_INTERRUPT_MASK_0_1) << 32); | ||
380 | #else | ||
381 | t->interrupt_mask = __insn_mfspr(SPR_INTERRUPT_MASK_0); | ||
382 | #endif | ||
383 | t->ex_context[0] = __insn_mfspr(SPR_EX_CONTEXT_0_0); | ||
384 | t->ex_context[1] = __insn_mfspr(SPR_EX_CONTEXT_0_1); | ||
385 | t->system_save[0] = __insn_mfspr(SPR_SYSTEM_SAVE_0_0); | ||
386 | t->system_save[1] = __insn_mfspr(SPR_SYSTEM_SAVE_0_1); | ||
387 | t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2); | ||
388 | t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3); | ||
389 | t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS); | ||
390 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
391 | t->proc_status = __insn_mfspr(SPR_PROC_STATUS); | ||
392 | #endif | ||
393 | } | ||
394 | |||
395 | static void restore_arch_state(const struct thread_struct *t) | ||
396 | { | ||
397 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
398 | __insn_mtspr(SPR_INTERRUPT_MASK_0_0, (u32) t->interrupt_mask); | ||
399 | __insn_mtspr(SPR_INTERRUPT_MASK_0_1, t->interrupt_mask >> 32); | ||
400 | #else | ||
401 | __insn_mtspr(SPR_INTERRUPT_MASK_0, t->interrupt_mask); | ||
402 | #endif | ||
403 | __insn_mtspr(SPR_EX_CONTEXT_0_0, t->ex_context[0]); | ||
404 | __insn_mtspr(SPR_EX_CONTEXT_0_1, t->ex_context[1]); | ||
405 | __insn_mtspr(SPR_SYSTEM_SAVE_0_0, t->system_save[0]); | ||
406 | __insn_mtspr(SPR_SYSTEM_SAVE_0_1, t->system_save[1]); | ||
407 | __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]); | ||
408 | __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]); | ||
409 | __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0); | ||
410 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
411 | __insn_mtspr(SPR_PROC_STATUS, t->proc_status); | ||
412 | #endif | ||
413 | #if CHIP_HAS_TILE_RTF_HWM() | ||
414 | /* | ||
415 | * Clear this whenever we switch back to a process in case | ||
416 | * the previous process was monkeying with it. Even if enabled | ||
417 | * in CBOX_MSR1 via TILE_RTF_HWM_MIN, it's still just a | ||
418 | * performance hint, so isn't worth a full save/restore. | ||
419 | */ | ||
420 | __insn_mtspr(SPR_TILE_RTF_HWM, 0); | ||
421 | #endif | ||
422 | } | ||
423 | |||
424 | |||
425 | void _prepare_arch_switch(struct task_struct *next) | ||
426 | { | ||
427 | #if CHIP_HAS_SN_PROC() | ||
428 | int snctl; | ||
429 | #endif | ||
430 | #if CHIP_HAS_TILE_DMA() | ||
431 | struct tile_dma_state *dma = ¤t->thread.tile_dma_state; | ||
432 | if (dma->enabled) | ||
433 | save_tile_dma_state(dma); | ||
434 | #endif | ||
435 | #if CHIP_HAS_SN_PROC() | ||
436 | /* | ||
437 | * Suspend the static network processor if it was running. | ||
438 | * We do not suspend the fabric itself, just like we don't | ||
439 | * try to suspend the UDN. | ||
440 | */ | ||
441 | snctl = __insn_mfspr(SPR_SNCTL); | ||
442 | current->thread.sn_proc_running = | ||
443 | (snctl & SPR_SNCTL__FRZPROC_MASK) == 0; | ||
444 | if (current->thread.sn_proc_running) | ||
445 | __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK); | ||
446 | #endif | ||
447 | } | ||
448 | |||
449 | |||
450 | extern struct task_struct *__switch_to(struct task_struct *prev, | ||
451 | struct task_struct *next, | ||
452 | unsigned long new_system_save_1_0); | ||
453 | |||
454 | struct task_struct *__sched _switch_to(struct task_struct *prev, | ||
455 | struct task_struct *next) | ||
456 | { | ||
457 | /* DMA state is already saved; save off other arch state. */ | ||
458 | save_arch_state(&prev->thread); | ||
459 | |||
460 | #if CHIP_HAS_TILE_DMA() | ||
461 | /* | ||
462 | * Restore DMA in new task if desired. | ||
463 | * Note that it is only safe to restart here since interrupts | ||
464 | * are disabled, so we can't take any DMATLB miss or access | ||
465 | * interrupts before we have finished switching stacks. | ||
466 | */ | ||
467 | if (next->thread.tile_dma_state.enabled) { | ||
468 | restore_tile_dma_state(&next->thread); | ||
469 | grant_dma_mpls(); | ||
470 | } else { | ||
471 | restrict_dma_mpls(); | ||
472 | } | ||
473 | #endif | ||
474 | |||
475 | /* Restore other arch state. */ | ||
476 | restore_arch_state(&next->thread); | ||
477 | |||
478 | #if CHIP_HAS_SN_PROC() | ||
479 | /* | ||
480 | * Restart static network processor in the new process | ||
481 | * if it was running before. | ||
482 | */ | ||
483 | if (next->thread.sn_proc_running) { | ||
484 | int snctl = __insn_mfspr(SPR_SNCTL); | ||
485 | __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK); | ||
486 | } | ||
487 | #endif | ||
488 | |||
489 | |||
490 | /* | ||
491 | * Switch kernel SP, PC, and callee-saved registers. | ||
492 | * In the context of the new task, return the old task pointer | ||
493 | * (i.e. the task that actually called __switch_to). | ||
494 | * Pass the value to use for SYSTEM_SAVE_1_0 when we reset our sp. | ||
495 | */ | ||
496 | return __switch_to(prev, next, next_current_ksp0(next)); | ||
497 | } | ||
498 | |||
499 | int _sys_fork(struct pt_regs *regs) | ||
500 | { | ||
501 | return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | ||
502 | } | ||
503 | |||
504 | int _sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
505 | int __user *parent_tidptr, int __user *child_tidptr, | ||
506 | struct pt_regs *regs) | ||
507 | { | ||
508 | if (!newsp) | ||
509 | newsp = regs->sp; | ||
510 | return do_fork(clone_flags, newsp, regs, 0, | ||
511 | parent_tidptr, child_tidptr); | ||
512 | } | ||
513 | |||
514 | int _sys_vfork(struct pt_regs *regs) | ||
515 | { | ||
516 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, | ||
517 | regs, 0, NULL, NULL); | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * sys_execve() executes a new program. | ||
522 | */ | ||
523 | int _sys_execve(char __user *path, char __user *__user *argv, | ||
524 | char __user *__user *envp, struct pt_regs *regs) | ||
525 | { | ||
526 | int error; | ||
527 | char *filename; | ||
528 | |||
529 | filename = getname(path); | ||
530 | error = PTR_ERR(filename); | ||
531 | if (IS_ERR(filename)) | ||
532 | goto out; | ||
533 | error = do_execve(filename, argv, envp, regs); | ||
534 | putname(filename); | ||
535 | out: | ||
536 | return error; | ||
537 | } | ||
538 | |||
539 | #ifdef CONFIG_COMPAT | ||
540 | int _compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
541 | compat_uptr_t __user *envp, struct pt_regs *regs) | ||
542 | { | ||
543 | int error; | ||
544 | char *filename; | ||
545 | |||
546 | filename = getname(path); | ||
547 | error = PTR_ERR(filename); | ||
548 | if (IS_ERR(filename)) | ||
549 | goto out; | ||
550 | error = compat_do_execve(filename, argv, envp, regs); | ||
551 | putname(filename); | ||
552 | out: | ||
553 | return error; | ||
554 | } | ||
555 | #endif | ||
556 | |||
557 | unsigned long get_wchan(struct task_struct *p) | ||
558 | { | ||
559 | struct KBacktraceIterator kbt; | ||
560 | |||
561 | if (!p || p == current || p->state == TASK_RUNNING) | ||
562 | return 0; | ||
563 | |||
564 | for (KBacktraceIterator_init(&kbt, p, NULL); | ||
565 | !KBacktraceIterator_end(&kbt); | ||
566 | KBacktraceIterator_next(&kbt)) { | ||
567 | if (!in_sched_functions(kbt.it.pc)) | ||
568 | return kbt.it.pc; | ||
569 | } | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * We pass in lr as zero (cleared in kernel_thread) and the caller | ||
576 | * part of the backtrace ABI on the stack also zeroed (in copy_thread) | ||
577 | * so that backtraces will stop with this function. | ||
578 | * Note that we don't use r0, since copy_thread() clears it. | ||
579 | */ | ||
580 | static void start_kernel_thread(int dummy, int (*fn)(int), int arg) | ||
581 | { | ||
582 | do_exit(fn(arg)); | ||
583 | } | ||
584 | |||
585 | /* | ||
586 | * Create a kernel thread | ||
587 | */ | ||
588 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | ||
589 | { | ||
590 | struct pt_regs regs; | ||
591 | |||
592 | memset(®s, 0, sizeof(regs)); | ||
593 | regs.ex1 = PL_ICS_EX1(KERNEL_PL, 0); /* run at kernel PL, no ICS */ | ||
594 | regs.pc = (long) start_kernel_thread; | ||
595 | regs.flags = PT_FLAGS_CALLER_SAVES; /* need to restore r1 and r2 */ | ||
596 | regs.regs[1] = (long) fn; /* function pointer */ | ||
597 | regs.regs[2] = (long) arg; /* parameter register */ | ||
598 | |||
599 | /* Ok, create the new process.. */ | ||
600 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, | ||
601 | 0, NULL, NULL); | ||
602 | } | ||
603 | EXPORT_SYMBOL(kernel_thread); | ||
604 | |||
605 | /* Flush thread state. */ | ||
606 | void flush_thread(void) | ||
607 | { | ||
608 | /* Nothing */ | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Free current thread data structures etc.. | ||
613 | */ | ||
614 | void exit_thread(void) | ||
615 | { | ||
616 | /* Nothing */ | ||
617 | } | ||
618 | |||
619 | #ifdef __tilegx__ | ||
620 | # define LINECOUNT 3 | ||
621 | # define EXTRA_NL "\n" | ||
622 | #else | ||
623 | # define LINECOUNT 4 | ||
624 | # define EXTRA_NL "" | ||
625 | #endif | ||
626 | |||
627 | void show_regs(struct pt_regs *regs) | ||
628 | { | ||
629 | struct task_struct *tsk = validate_current(); | ||
630 | int i, linebreak; | ||
631 | printk("\n"); | ||
632 | printk(" Pid: %d, comm: %20s, CPU: %d\n", | ||
633 | tsk->pid, tsk->comm, smp_processor_id()); | ||
634 | for (i = linebreak = 0; i < 53; ++i) { | ||
635 | printk(" r%-2d: "REGFMT, i, regs->regs[i]); | ||
636 | if (++linebreak == LINECOUNT) { | ||
637 | linebreak = 0; | ||
638 | printk("\n"); | ||
639 | } | ||
640 | } | ||
641 | printk(" tp : "REGFMT EXTRA_NL " sp : "REGFMT" lr : "REGFMT"\n", | ||
642 | regs->tp, regs->sp, regs->lr); | ||
643 | printk(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", | ||
644 | regs->pc, regs->ex1, regs->faultnum); | ||
645 | |||
646 | dump_stack_regs(regs); | ||
647 | } | ||
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c new file mode 100644 index 000000000000..468054928e7d --- /dev/null +++ b/arch/tile/kernel/ptrace.c | |||
@@ -0,0 +1,203 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Copied from i386: Ross Biro 1/23/92 | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/kprobes.h> | ||
20 | #include <linux/compat.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | void user_enable_single_step(struct task_struct *child) | ||
24 | { | ||
25 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
26 | } | ||
27 | |||
28 | void user_disable_single_step(struct task_struct *child) | ||
29 | { | ||
30 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
31 | } | ||
32 | |||
33 | /* | ||
34 | * This routine will put a word on the process's privileged stack. | ||
35 | */ | ||
36 | static void putreg(struct task_struct *task, | ||
37 | unsigned long addr, unsigned long value) | ||
38 | { | ||
39 | unsigned int regno = addr / sizeof(unsigned long); | ||
40 | struct pt_regs *childregs = task_pt_regs(task); | ||
41 | childregs->regs[regno] = value; | ||
42 | childregs->flags |= PT_FLAGS_RESTORE_REGS; | ||
43 | } | ||
44 | |||
45 | static unsigned long getreg(struct task_struct *task, unsigned long addr) | ||
46 | { | ||
47 | unsigned int regno = addr / sizeof(unsigned long); | ||
48 | struct pt_regs *childregs = task_pt_regs(task); | ||
49 | return childregs->regs[regno]; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Called by kernel/ptrace.c when detaching.. | ||
54 | */ | ||
55 | void ptrace_disable(struct task_struct *child) | ||
56 | { | ||
57 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
58 | |||
59 | /* | ||
60 | * These two are currently unused, but will be set by arch_ptrace() | ||
61 | * and used in the syscall assembly when we do support them. | ||
62 | */ | ||
63 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
64 | } | ||
65 | |||
66 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | ||
67 | { | ||
68 | unsigned long __user *datap; | ||
69 | unsigned long tmp; | ||
70 | int i; | ||
71 | long ret = -EIO; | ||
72 | |||
73 | #ifdef CONFIG_COMPAT | ||
74 | if (task_thread_info(current)->status & TS_COMPAT) | ||
75 | data = (u32)data; | ||
76 | if (task_thread_info(child)->status & TS_COMPAT) | ||
77 | addr = (u32)addr; | ||
78 | #endif | ||
79 | datap = (unsigned long __user *)data; | ||
80 | |||
81 | switch (request) { | ||
82 | |||
83 | case PTRACE_PEEKUSR: /* Read register from pt_regs. */ | ||
84 | if (addr & (sizeof(data)-1)) | ||
85 | break; | ||
86 | if (addr < 0 || addr >= PTREGS_SIZE) | ||
87 | break; | ||
88 | tmp = getreg(child, addr); /* Read register */ | ||
89 | ret = put_user(tmp, datap); | ||
90 | break; | ||
91 | |||
92 | case PTRACE_POKEUSR: /* Write register in pt_regs. */ | ||
93 | if (addr & (sizeof(data)-1)) | ||
94 | break; | ||
95 | if (addr < 0 || addr >= PTREGS_SIZE) | ||
96 | break; | ||
97 | putreg(child, addr, data); /* Write register */ | ||
98 | break; | ||
99 | |||
100 | case PTRACE_GETREGS: /* Get all registers from the child. */ | ||
101 | if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE)) | ||
102 | break; | ||
103 | for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { | ||
104 | ret = __put_user(getreg(child, i), datap); | ||
105 | if (ret != 0) | ||
106 | break; | ||
107 | datap++; | ||
108 | } | ||
109 | break; | ||
110 | |||
111 | case PTRACE_SETREGS: /* Set all registers in the child. */ | ||
112 | if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE)) | ||
113 | break; | ||
114 | for (i = 0; i < PTREGS_SIZE; i += sizeof(long)) { | ||
115 | ret = __get_user(tmp, datap); | ||
116 | if (ret != 0) | ||
117 | break; | ||
118 | putreg(child, i, tmp); | ||
119 | datap++; | ||
120 | } | ||
121 | break; | ||
122 | |||
123 | case PTRACE_GETFPREGS: /* Get the child FPU state. */ | ||
124 | case PTRACE_SETFPREGS: /* Set the child FPU state. */ | ||
125 | break; | ||
126 | |||
127 | case PTRACE_SETOPTIONS: | ||
128 | /* Support TILE-specific ptrace options. */ | ||
129 | child->ptrace &= ~PT_TRACE_MASK_TILE; | ||
130 | tmp = data & PTRACE_O_MASK_TILE; | ||
131 | data &= ~PTRACE_O_MASK_TILE; | ||
132 | ret = ptrace_request(child, request, addr, data); | ||
133 | if (tmp & PTRACE_O_TRACEMIGRATE) | ||
134 | child->ptrace |= PT_TRACE_MIGRATE; | ||
135 | break; | ||
136 | |||
137 | default: | ||
138 | #ifdef CONFIG_COMPAT | ||
139 | if (task_thread_info(current)->status & TS_COMPAT) { | ||
140 | ret = compat_ptrace_request(child, request, | ||
141 | addr, data); | ||
142 | break; | ||
143 | } | ||
144 | #endif | ||
145 | ret = ptrace_request(child, request, addr, data); | ||
146 | break; | ||
147 | } | ||
148 | |||
149 | return ret; | ||
150 | } | ||
151 | |||
152 | #ifdef CONFIG_COMPAT | ||
153 | /* Not used; we handle compat issues in arch_ptrace() directly. */ | ||
154 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
155 | compat_ulong_t addr, compat_ulong_t data) | ||
156 | { | ||
157 | BUG(); | ||
158 | } | ||
159 | #endif | ||
160 | |||
161 | void do_syscall_trace(void) | ||
162 | { | ||
163 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
164 | return; | ||
165 | |||
166 | if (!(current->ptrace & PT_PTRACED)) | ||
167 | return; | ||
168 | |||
169 | /* | ||
170 | * The 0x80 provides a way for the tracing parent to distinguish | ||
171 | * between a syscall stop and SIGTRAP delivery | ||
172 | */ | ||
173 | ptrace_notify(SIGTRAP|((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); | ||
174 | |||
175 | /* | ||
176 | * this isn't the same as continuing with a signal, but it will do | ||
177 | * for normal use. strace only continues with a signal if the | ||
178 | * stopping signal is not SIGTRAP. -brl | ||
179 | */ | ||
180 | if (current->exit_code) { | ||
181 | send_sig(current->exit_code, current, 1); | ||
182 | current->exit_code = 0; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) | ||
187 | { | ||
188 | struct siginfo info; | ||
189 | |||
190 | memset(&info, 0, sizeof(info)); | ||
191 | info.si_signo = SIGTRAP; | ||
192 | info.si_code = TRAP_BRKPT; | ||
193 | info.si_addr = (void __user *) regs->pc; | ||
194 | |||
195 | /* Send us the fakey SIGTRAP */ | ||
196 | force_sig_info(SIGTRAP, &info, tsk); | ||
197 | } | ||
198 | |||
199 | /* Handle synthetic interrupt delivered only by the simulator. */ | ||
200 | void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num) | ||
201 | { | ||
202 | send_sigtrap(current, regs, fault_num); | ||
203 | } | ||
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c new file mode 100644 index 000000000000..a4523923605e --- /dev/null +++ b/arch/tile/kernel/reboot.c | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/stddef.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | #ifndef CONFIG_SMP | ||
23 | #define smp_send_stop() | ||
24 | #endif | ||
25 | |||
26 | void machine_halt(void) | ||
27 | { | ||
28 | warn_early_printk(); | ||
29 | raw_local_irq_disable_all(); | ||
30 | smp_send_stop(); | ||
31 | hv_halt(); | ||
32 | } | ||
33 | |||
34 | void machine_power_off(void) | ||
35 | { | ||
36 | warn_early_printk(); | ||
37 | raw_local_irq_disable_all(); | ||
38 | smp_send_stop(); | ||
39 | hv_power_off(); | ||
40 | } | ||
41 | |||
42 | void machine_restart(char *cmd) | ||
43 | { | ||
44 | raw_local_irq_disable_all(); | ||
45 | smp_send_stop(); | ||
46 | hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * Power off function, if any | ||
51 | */ | ||
52 | void (*pm_power_off)(void) = machine_power_off; | ||
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S new file mode 100644 index 000000000000..e88d6e122783 --- /dev/null +++ b/arch/tile/kernel/regs_32.S | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/system.h> | ||
17 | #include <asm/ptrace.h> | ||
18 | #include <asm/asm-offsets.h> | ||
19 | #include <arch/spr_def.h> | ||
20 | #include <asm/processor.h> | ||
21 | |||
22 | /* | ||
23 | * See <asm/system.h>; called with prev and next task_struct pointers. | ||
24 | * "prev" is returned in r0 for _switch_to and also for ret_from_fork. | ||
25 | * | ||
26 | * We want to save pc/sp in "prev", and get the new pc/sp from "next". | ||
27 | * We also need to save all the callee-saved registers on the stack. | ||
28 | * | ||
29 | * Intel enables/disables access to the hardware cycle counter in | ||
30 | * seccomp (secure computing) environments if necessary, based on | ||
31 | * has_secure_computing(). We might want to do this at some point, | ||
32 | * though it would require virtualizing the other SPRs under WORLD_ACCESS. | ||
33 | * | ||
34 | * Since we're saving to the stack, we omit sp from this list. | ||
35 | * And for parallels with other architectures, we save lr separately, | ||
36 | * in the thread_struct itself (as the "pc" field). | ||
37 | * | ||
38 | * This code also needs to be aligned with process.c copy_thread() | ||
39 | */ | ||
40 | |||
41 | #if CALLEE_SAVED_REGS_COUNT != 24 | ||
42 | # error Mismatch between <asm/system.h> and kernel/entry.S | ||
43 | #endif | ||
44 | #define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4) | ||
45 | |||
46 | #define SAVE_REG(r) { sw r12, r; addi r12, r12, 4 } | ||
47 | #define LOAD_REG(r) { lw r, r12; addi r12, r12, 4 } | ||
48 | #define FOR_EACH_CALLEE_SAVED_REG(f) \ | ||
49 | f(r30); f(r31); \ | ||
50 | f(r32); f(r33); f(r34); f(r35); f(r36); f(r37); f(r38); f(r39); \ | ||
51 | f(r40); f(r41); f(r42); f(r43); f(r44); f(r45); f(r46); f(r47); \ | ||
52 | f(r48); f(r49); f(r50); f(r51); f(r52); | ||
53 | |||
54 | STD_ENTRY_SECTION(__switch_to, .sched.text) | ||
55 | { | ||
56 | move r10, sp | ||
57 | sw sp, lr | ||
58 | addi sp, sp, -FRAME_SIZE | ||
59 | } | ||
60 | { | ||
61 | addi r11, sp, 4 | ||
62 | addi r12, sp, 8 | ||
63 | } | ||
64 | { | ||
65 | sw r11, r10 | ||
66 | addli r4, r1, TASK_STRUCT_THREAD_KSP_OFFSET | ||
67 | } | ||
68 | { | ||
69 | lw r13, r4 /* Load new sp to a temp register early. */ | ||
70 | addli r3, r0, TASK_STRUCT_THREAD_KSP_OFFSET | ||
71 | } | ||
72 | FOR_EACH_CALLEE_SAVED_REG(SAVE_REG) | ||
73 | { | ||
74 | sw r3, sp | ||
75 | addli r3, r0, TASK_STRUCT_THREAD_PC_OFFSET | ||
76 | } | ||
77 | { | ||
78 | sw r3, lr | ||
79 | addli r4, r1, TASK_STRUCT_THREAD_PC_OFFSET | ||
80 | } | ||
81 | { | ||
82 | lw lr, r4 | ||
83 | addi r12, r13, 8 | ||
84 | } | ||
85 | { | ||
86 | /* Update sp and ksp0 simultaneously to avoid backtracer warnings. */ | ||
87 | move sp, r13 | ||
88 | mtspr SYSTEM_SAVE_1_0, r2 | ||
89 | } | ||
90 | FOR_EACH_CALLEE_SAVED_REG(LOAD_REG) | ||
91 | .L__switch_to_pc: | ||
92 | { | ||
93 | addi sp, sp, FRAME_SIZE | ||
94 | jrp lr /* r0 is still valid here, so return it */ | ||
95 | } | ||
96 | STD_ENDPROC(__switch_to) | ||
97 | |||
98 | /* Return a suitable address for the backtracer for suspended threads */ | ||
99 | STD_ENTRY_SECTION(get_switch_to_pc, .sched.text) | ||
100 | lnk r0 | ||
101 | { | ||
102 | addli r0, r0, .L__switch_to_pc - . | ||
103 | jrp lr | ||
104 | } | ||
105 | STD_ENDPROC(get_switch_to_pc) | ||
106 | |||
107 | STD_ENTRY(get_pt_regs) | ||
108 | .irp reg, r0, r1, r2, r3, r4, r5, r6, r7, \ | ||
109 | r8, r9, r10, r11, r12, r13, r14, r15, \ | ||
110 | r16, r17, r18, r19, r20, r21, r22, r23, \ | ||
111 | r24, r25, r26, r27, r28, r29, r30, r31, \ | ||
112 | r32, r33, r34, r35, r36, r37, r38, r39, \ | ||
113 | r40, r41, r42, r43, r44, r45, r46, r47, \ | ||
114 | r48, r49, r50, r51, r52, tp, sp | ||
115 | { | ||
116 | sw r0, \reg | ||
117 | addi r0, r0, 4 | ||
118 | } | ||
119 | .endr | ||
120 | { | ||
121 | sw r0, lr | ||
122 | addi r0, r0, PTREGS_OFFSET_PC - PTREGS_OFFSET_LR | ||
123 | } | ||
124 | lnk r1 | ||
125 | { | ||
126 | sw r0, r1 | ||
127 | addi r0, r0, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC | ||
128 | } | ||
129 | mfspr r1, INTERRUPT_CRITICAL_SECTION | ||
130 | shli r1, r1, SPR_EX_CONTEXT_1_1__ICS_SHIFT | ||
131 | ori r1, r1, KERNEL_PL | ||
132 | { | ||
133 | sw r0, r1 | ||
134 | addi r0, r0, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1 | ||
135 | } | ||
136 | { | ||
137 | sw r0, zero /* clear faultnum */ | ||
138 | addi r0, r0, PTREGS_OFFSET_ORIG_R0 - PTREGS_OFFSET_FAULTNUM | ||
139 | } | ||
140 | { | ||
141 | sw r0, zero /* clear orig_r0 */ | ||
142 | addli r0, r0, -PTREGS_OFFSET_ORIG_R0 /* restore r0 to base */ | ||
143 | } | ||
144 | jrp lr | ||
145 | STD_ENDPROC(get_pt_regs) | ||
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel.S new file mode 100644 index 000000000000..010b418515f8 --- /dev/null +++ b/arch/tile/kernel/relocate_kernel.S | |||
@@ -0,0 +1,280 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * copy new kernel into place and then call hv_reexec | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <arch/chip.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA | ||
24 | |||
25 | #define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f)) | ||
26 | |||
27 | #define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC) | ||
28 | #define ___hv_halt ___hv_dispatch(HV_DISPATCH_HALT) | ||
29 | #define ___hv_reexec ___hv_dispatch(HV_DISPATCH_REEXEC) | ||
30 | #define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE) | ||
31 | |||
32 | #undef RELOCATE_NEW_KERNEL_VERBOSE | ||
33 | |||
34 | STD_ENTRY(relocate_new_kernel) | ||
35 | |||
36 | move r30, r0 /* page list */ | ||
37 | move r31, r1 /* address of page we are on */ | ||
38 | move r32, r2 /* start address of new kernel */ | ||
39 | |||
40 | shri r1, r1, PAGE_SHIFT | ||
41 | addi r1, r1, 1 | ||
42 | shli sp, r1, PAGE_SHIFT | ||
43 | addi sp, sp, -8 | ||
44 | /* we now have a stack (whether we need one or not) */ | ||
45 | |||
46 | moveli r40, lo16(___hv_console_putc) | ||
47 | auli r40, r40, ha16(___hv_console_putc) | ||
48 | |||
49 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
50 | moveli r0, 'r' | ||
51 | jalr r40 | ||
52 | |||
53 | moveli r0, '_' | ||
54 | jalr r40 | ||
55 | |||
56 | moveli r0, 'n' | ||
57 | jalr r40 | ||
58 | |||
59 | moveli r0, '_' | ||
60 | jalr r40 | ||
61 | |||
62 | moveli r0, 'k' | ||
63 | jalr r40 | ||
64 | |||
65 | moveli r0, '\n' | ||
66 | jalr r40 | ||
67 | #endif | ||
68 | |||
69 | /* | ||
70 | * Throughout this code r30 is pointer to the element of page | ||
71 | * list we are working on. | ||
72 | * | ||
73 | * Normally we get to the next element of the page list by | ||
74 | * incrementing r30 by four. The exception is if the element | ||
75 | * on the page list is an IND_INDIRECTION in which case we use | ||
76 | * the element with the low bits masked off as the new value | ||
77 | * of r30. | ||
78 | * | ||
79 | * To get this started, we need the value passed to us (which | ||
80 | * will always be an IND_INDIRECTION) in memory somewhere with | ||
81 | * r30 pointing at it. To do that, we push the value passed | ||
82 | * to us on the stack and make r30 point to it. | ||
83 | */ | ||
84 | |||
85 | sw sp, r30 | ||
86 | move r30, sp | ||
87 | addi sp, sp, -8 | ||
88 | |||
89 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
90 | /* | ||
91 | * On TILEPro, we need to flush all tiles' caches, since we may | ||
92 | * have been doing hash-for-home caching there. Note that we | ||
93 | * must do this _after_ we're completely done modifying any memory | ||
94 | * other than our output buffer (which we know is locally cached). | ||
95 | * We want the caches to be fully clean when we do the reexec, | ||
96 | * because the hypervisor is going to do this flush again at that | ||
97 | * point, and we don't want that second flush to overwrite any memory. | ||
98 | */ | ||
99 | { | ||
100 | move r0, zero /* cache_pa */ | ||
101 | move r1, zero | ||
102 | } | ||
103 | { | ||
104 | auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */ | ||
105 | movei r3, -1 /* cache_cpumask; -1 means all client tiles */ | ||
106 | } | ||
107 | { | ||
108 | move r4, zero /* tlb_va */ | ||
109 | move r5, zero /* tlb_length */ | ||
110 | } | ||
111 | { | ||
112 | move r6, zero /* tlb_pgsize */ | ||
113 | move r7, zero /* tlb_cpumask */ | ||
114 | } | ||
115 | { | ||
116 | move r8, zero /* asids */ | ||
117 | moveli r20, lo16(___hv_flush_remote) | ||
118 | } | ||
119 | { | ||
120 | move r9, zero /* asidcount */ | ||
121 | auli r20, r20, ha16(___hv_flush_remote) | ||
122 | } | ||
123 | |||
124 | jalr r20 | ||
125 | #endif | ||
126 | |||
127 | /* r33 is destination pointer, default to zero */ | ||
128 | |||
129 | moveli r33, 0 | ||
130 | |||
131 | .Lloop: lw r10, r30 | ||
132 | |||
133 | andi r9, r10, 0xf /* low 4 bits tell us what type it is */ | ||
134 | xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */ | ||
135 | |||
136 | seqi r0, r9, 0x1 /* IND_DESTINATION */ | ||
137 | bzt r0, .Ltry2 | ||
138 | |||
139 | move r33, r10 | ||
140 | |||
141 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
142 | moveli r0, 'd' | ||
143 | jalr r40 | ||
144 | #endif | ||
145 | |||
146 | addi r30, r30, 4 | ||
147 | j .Lloop | ||
148 | |||
149 | .Ltry2: | ||
150 | seqi r0, r9, 0x2 /* IND_INDIRECTION */ | ||
151 | bzt r0, .Ltry4 | ||
152 | |||
153 | move r30, r10 | ||
154 | |||
155 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
156 | moveli r0, 'i' | ||
157 | jalr r40 | ||
158 | #endif | ||
159 | |||
160 | j .Lloop | ||
161 | |||
162 | .Ltry4: | ||
163 | seqi r0, r9, 0x4 /* IND_DONE */ | ||
164 | bzt r0, .Ltry8 | ||
165 | |||
166 | mf | ||
167 | |||
168 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
169 | moveli r0, 'D' | ||
170 | jalr r40 | ||
171 | moveli r0, '\n' | ||
172 | jalr r40 | ||
173 | #endif | ||
174 | |||
175 | move r0, r32 | ||
176 | moveli r1, 0 /* arg to hv_reexec is 64 bits */ | ||
177 | |||
178 | moveli r41, lo16(___hv_reexec) | ||
179 | auli r41, r41, ha16(___hv_reexec) | ||
180 | |||
181 | jalr r41 | ||
182 | |||
183 | /* we should not get here */ | ||
184 | |||
185 | moveli r0, '?' | ||
186 | jalr r40 | ||
187 | moveli r0, '\n' | ||
188 | jalr r40 | ||
189 | |||
190 | j .Lhalt | ||
191 | |||
192 | .Ltry8: seqi r0, r9, 0x8 /* IND_SOURCE */ | ||
193 | bz r0, .Lerr /* unknown type */ | ||
194 | |||
195 | /* copy page at r10 to page at r33 */ | ||
196 | |||
197 | move r11, r33 | ||
198 | |||
199 | moveli r0, lo16(PAGE_SIZE) | ||
200 | auli r0, r0, ha16(PAGE_SIZE) | ||
201 | add r33, r33, r0 | ||
202 | |||
203 | /* copy word at r10 to word at r11 until r11 equals r33 */ | ||
204 | |||
205 | /* We know page size must be multiple of 16, so we can unroll | ||
206 | * 16 times safely without any edge case checking. | ||
207 | * | ||
208 | * Issue a flush of the destination every 16 words to avoid | ||
209 | * incoherence when starting the new kernel. (Now this is | ||
210 | * just good paranoia because the hv_reexec call will also | ||
211 | * take care of this.) | ||
212 | */ | ||
213 | |||
214 | 1: | ||
215 | { lw r0, r10; addi r10, r10, 4 } | ||
216 | { sw r11, r0; addi r11, r11, 4 } | ||
217 | { lw r0, r10; addi r10, r10, 4 } | ||
218 | { sw r11, r0; addi r11, r11, 4 } | ||
219 | { lw r0, r10; addi r10, r10, 4 } | ||
220 | { sw r11, r0; addi r11, r11, 4 } | ||
221 | { lw r0, r10; addi r10, r10, 4 } | ||
222 | { sw r11, r0; addi r11, r11, 4 } | ||
223 | { lw r0, r10; addi r10, r10, 4 } | ||
224 | { sw r11, r0; addi r11, r11, 4 } | ||
225 | { lw r0, r10; addi r10, r10, 4 } | ||
226 | { sw r11, r0; addi r11, r11, 4 } | ||
227 | { lw r0, r10; addi r10, r10, 4 } | ||
228 | { sw r11, r0; addi r11, r11, 4 } | ||
229 | { lw r0, r10; addi r10, r10, 4 } | ||
230 | { sw r11, r0; addi r11, r11, 4 } | ||
231 | { lw r0, r10; addi r10, r10, 4 } | ||
232 | { sw r11, r0; addi r11, r11, 4 } | ||
233 | { lw r0, r10; addi r10, r10, 4 } | ||
234 | { sw r11, r0; addi r11, r11, 4 } | ||
235 | { lw r0, r10; addi r10, r10, 4 } | ||
236 | { sw r11, r0; addi r11, r11, 4 } | ||
237 | { lw r0, r10; addi r10, r10, 4 } | ||
238 | { sw r11, r0; addi r11, r11, 4 } | ||
239 | { lw r0, r10; addi r10, r10, 4 } | ||
240 | { sw r11, r0; addi r11, r11, 4 } | ||
241 | { lw r0, r10; addi r10, r10, 4 } | ||
242 | { sw r11, r0; addi r11, r11, 4 } | ||
243 | { lw r0, r10; addi r10, r10, 4 } | ||
244 | { sw r11, r0; addi r11, r11, 4 } | ||
245 | { lw r0, r10; addi r10, r10, 4 } | ||
246 | { sw r11, r0 } | ||
247 | { flush r11 ; addi r11, r11, 4 } | ||
248 | |||
249 | seq r0, r33, r11 | ||
250 | bzt r0, 1b | ||
251 | |||
252 | #ifdef RELOCATE_NEW_KERNEL_VERBOSE | ||
253 | moveli r0, 's' | ||
254 | jalr r40 | ||
255 | #endif | ||
256 | |||
257 | addi r30, r30, 4 | ||
258 | j .Lloop | ||
259 | |||
260 | |||
261 | .Lerr: moveli r0, 'e' | ||
262 | jalr r40 | ||
263 | moveli r0, 'r' | ||
264 | jalr r40 | ||
265 | moveli r0, 'r' | ||
266 | jalr r40 | ||
267 | moveli r0, '\n' | ||
268 | jalr r40 | ||
269 | .Lhalt: | ||
270 | moveli r41, lo16(___hv_halt) | ||
271 | auli r41, r41, ha16(___hv_halt) | ||
272 | |||
273 | jalr r41 | ||
274 | STD_ENDPROC(relocate_new_kernel) | ||
275 | |||
276 | .section .rodata,"a" | ||
277 | |||
278 | .globl relocate_new_kernel_size | ||
279 | relocate_new_kernel_size: | ||
280 | .long .Lend_relocate_new_kernel - relocate_new_kernel | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c new file mode 100644 index 000000000000..934136b61ceb --- /dev/null +++ b/arch/tile/kernel/setup.c | |||
@@ -0,0 +1,1497 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mmzone.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/node.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/ioport.h> | ||
23 | #include <linux/kexec.h> | ||
24 | #include <linux/pci.h> | ||
25 | #include <linux/initrd.h> | ||
26 | #include <linux/io.h> | ||
27 | #include <linux/highmem.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <asm/setup.h> | ||
31 | #include <asm/sections.h> | ||
32 | #include <asm/sections.h> | ||
33 | #include <asm/cacheflush.h> | ||
34 | #include <asm/cacheflush.h> | ||
35 | #include <asm/pgalloc.h> | ||
36 | #include <asm/mmu_context.h> | ||
37 | #include <hv/hypervisor.h> | ||
38 | #include <arch/interrupts.h> | ||
39 | |||
40 | /* <linux/smp.h> doesn't provide this definition. */ | ||
41 | #ifndef CONFIG_SMP | ||
42 | #define setup_max_cpus 1 | ||
43 | #endif | ||
44 | |||
45 | static inline int ABS(int x) { return x >= 0 ? x : -x; } | ||
46 | |||
47 | /* Chip information */ | ||
48 | char chip_model[64] __write_once; | ||
49 | |||
50 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | ||
51 | EXPORT_SYMBOL(node_data); | ||
52 | |||
53 | /* We only create bootmem data on node 0. */ | ||
54 | static bootmem_data_t __initdata node0_bdata; | ||
55 | |||
56 | /* Information on the NUMA nodes that we compute early */ | ||
57 | unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; | ||
58 | unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; | ||
59 | unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; | ||
60 | unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; | ||
61 | unsigned long __initdata node_free_pfn[MAX_NUMNODES]; | ||
62 | |||
63 | #ifdef CONFIG_HIGHMEM | ||
64 | /* Page frame index of end of lowmem on each controller. */ | ||
65 | unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; | ||
66 | |||
67 | /* Number of pages that can be mapped into lowmem. */ | ||
68 | static unsigned long __initdata mappable_physpages; | ||
69 | #endif | ||
70 | |||
71 | /* Data on which physical memory controller corresponds to which NUMA node */ | ||
72 | int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; | ||
73 | |||
74 | #ifdef CONFIG_HIGHMEM | ||
75 | /* Map information from VAs to PAs */ | ||
76 | unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] | ||
77 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
78 | EXPORT_SYMBOL(pbase_map); | ||
79 | |||
80 | /* Map information from PAs to VAs */ | ||
81 | void *vbase_map[NR_PA_HIGHBIT_VALUES] | ||
82 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
83 | EXPORT_SYMBOL(vbase_map); | ||
84 | #endif | ||
85 | |||
86 | /* Node number as a function of the high PA bits */ | ||
87 | int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; | ||
88 | EXPORT_SYMBOL(highbits_to_node); | ||
89 | |||
90 | static unsigned int __initdata maxmem_pfn = -1U; | ||
91 | static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | ||
92 | [0 ... MAX_NUMNODES-1] = -1U | ||
93 | }; | ||
94 | static nodemask_t __initdata isolnodes; | ||
95 | |||
96 | #ifdef CONFIG_PCI | ||
97 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; | ||
98 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | ||
99 | unsigned long __initdata pci_reserve_start_pfn = -1U; | ||
100 | unsigned long __initdata pci_reserve_end_pfn = -1U; | ||
101 | #endif | ||
102 | |||
103 | static int __init setup_maxmem(char *str) | ||
104 | { | ||
105 | long maxmem_mb; | ||
106 | if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || | ||
107 | maxmem_mb == 0) | ||
108 | return -EINVAL; | ||
109 | |||
110 | maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << | ||
111 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
112 | printk("Forcing RAM used to no more than %dMB\n", | ||
113 | maxmem_pfn >> (20 - PAGE_SHIFT)); | ||
114 | return 0; | ||
115 | } | ||
116 | early_param("maxmem", setup_maxmem); | ||
117 | |||
118 | static int __init setup_maxnodemem(char *str) | ||
119 | { | ||
120 | char *endp; | ||
121 | long maxnodemem_mb, node; | ||
122 | |||
123 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | ||
124 | if (node >= MAX_NUMNODES || *endp != ':' || | ||
125 | strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) | ||
126 | return -EINVAL; | ||
127 | |||
128 | maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << | ||
129 | (HPAGE_SHIFT - PAGE_SHIFT); | ||
130 | printk("Forcing RAM used on node %ld to no more than %dMB\n", | ||
131 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); | ||
132 | return 0; | ||
133 | } | ||
134 | early_param("maxnodemem", setup_maxnodemem); | ||
135 | |||
136 | static int __init setup_isolnodes(char *str) | ||
137 | { | ||
138 | char buf[MAX_NUMNODES * 5]; | ||
139 | if (str == NULL || nodelist_parse(str, isolnodes) != 0) | ||
140 | return -EINVAL; | ||
141 | |||
142 | nodelist_scnprintf(buf, sizeof(buf), isolnodes); | ||
143 | printk("Set isolnodes value to '%s'\n", buf); | ||
144 | return 0; | ||
145 | } | ||
146 | early_param("isolnodes", setup_isolnodes); | ||
147 | |||
148 | #ifdef CONFIG_PCI | ||
149 | static int __init setup_pci_reserve(char* str) | ||
150 | { | ||
151 | unsigned long mb; | ||
152 | |||
153 | if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || | ||
154 | mb > 3 * 1024) | ||
155 | return -EINVAL; | ||
156 | |||
157 | pci_reserve_mb = mb; | ||
158 | printk("Reserving %dMB for PCIE root complex mappings\n", | ||
159 | pci_reserve_mb); | ||
160 | return 0; | ||
161 | } | ||
162 | early_param("pci_reserve", setup_pci_reserve); | ||
163 | #endif | ||
164 | |||
165 | #ifndef __tilegx__ | ||
166 | /* | ||
167 | * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. | ||
168 | * This can be used to increase (or decrease) the vmalloc area. | ||
169 | */ | ||
170 | static int __init parse_vmalloc(char *arg) | ||
171 | { | ||
172 | if (!arg) | ||
173 | return -EINVAL; | ||
174 | |||
175 | VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; | ||
176 | |||
177 | /* See validate_va() for more on this test. */ | ||
178 | if ((long)_VMALLOC_START >= 0) | ||
179 | early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", | ||
180 | VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | early_param("vmalloc", parse_vmalloc); | ||
185 | #endif | ||
186 | |||
187 | #ifdef CONFIG_HIGHMEM | ||
188 | /* | ||
189 | * Determine for each controller where its lowmem is mapped and how | ||
190 | * much of it is mapped there. On controller zero, the first few | ||
191 | * megabytes are mapped at 0xfd000000 as code, so in principle we | ||
192 | * could start our data mappings higher up, but for now we don't | ||
193 | * bother, to avoid additional confusion. | ||
194 | * | ||
195 | * One question is whether, on systems with more than 768 Mb and | ||
196 | * controllers of different sizes, to map in a proportionate amount of | ||
197 | * each one, or to try to map the same amount from each controller. | ||
198 | * (E.g. if we have three controllers with 256MB, 1GB, and 256MB | ||
199 | * respectively, do we map 256MB from each, or do we map 128 MB, 512 | ||
200 | * MB, and 128 MB respectively?) For now we use a proportionate | ||
201 | * solution like the latter. | ||
202 | * | ||
203 | * The VA/PA mapping demands that we align our decisions at 16 MB | ||
204 | * boundaries so that we can rapidly convert VA to PA. | ||
205 | */ | ||
206 | static void *__init setup_pa_va_mapping(void) | ||
207 | { | ||
208 | unsigned long curr_pages = 0; | ||
209 | unsigned long vaddr = PAGE_OFFSET; | ||
210 | nodemask_t highonlynodes = isolnodes; | ||
211 | int i, j; | ||
212 | |||
213 | memset(pbase_map, -1, sizeof(pbase_map)); | ||
214 | memset(vbase_map, -1, sizeof(vbase_map)); | ||
215 | |||
216 | /* Node zero cannot be isolated for LOWMEM purposes. */ | ||
217 | node_clear(0, highonlynodes); | ||
218 | |||
219 | /* Count up the number of pages on non-highonlynodes controllers. */ | ||
220 | mappable_physpages = 0; | ||
221 | for_each_online_node(i) { | ||
222 | if (!node_isset(i, highonlynodes)) | ||
223 | mappable_physpages += | ||
224 | node_end_pfn[i] - node_start_pfn[i]; | ||
225 | } | ||
226 | |||
227 | for_each_online_node(i) { | ||
228 | unsigned long start = node_start_pfn[i]; | ||
229 | unsigned long end = node_end_pfn[i]; | ||
230 | unsigned long size = end - start; | ||
231 | unsigned long vaddr_end; | ||
232 | |||
233 | if (node_isset(i, highonlynodes)) { | ||
234 | /* Mark this controller as having no lowmem. */ | ||
235 | node_lowmem_end_pfn[i] = start; | ||
236 | continue; | ||
237 | } | ||
238 | |||
239 | curr_pages += size; | ||
240 | if (mappable_physpages > MAXMEM_PFN) { | ||
241 | vaddr_end = PAGE_OFFSET + | ||
242 | (((u64)curr_pages * MAXMEM_PFN / | ||
243 | mappable_physpages) | ||
244 | << PAGE_SHIFT); | ||
245 | } else { | ||
246 | vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); | ||
247 | } | ||
248 | for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { | ||
249 | unsigned long this_pfn = | ||
250 | start + (j << HUGETLB_PAGE_ORDER); | ||
251 | pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; | ||
252 | if (vbase_map[__pfn_to_highbits(this_pfn)] == | ||
253 | (void *)-1) | ||
254 | vbase_map[__pfn_to_highbits(this_pfn)] = | ||
255 | (void *)(vaddr & HPAGE_MASK); | ||
256 | } | ||
257 | node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); | ||
258 | BUG_ON(node_lowmem_end_pfn[i] > end); | ||
259 | } | ||
260 | |||
261 | /* Return highest address of any mapped memory. */ | ||
262 | return (void *)vaddr; | ||
263 | } | ||
264 | #endif /* CONFIG_HIGHMEM */ | ||
265 | |||
266 | /* | ||
267 | * Register our most important memory mappings with the debug stub. | ||
268 | * | ||
269 | * This is up to 4 mappings for lowmem, one mapping per memory | ||
270 | * controller, plus one for our text segment. | ||
271 | */ | ||
272 | void __cpuinit store_permanent_mappings(void) | ||
273 | { | ||
274 | int i; | ||
275 | |||
276 | for_each_online_node(i) { | ||
277 | HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; | ||
278 | #ifdef CONFIG_HIGHMEM | ||
279 | HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; | ||
280 | #else | ||
281 | HV_PhysAddr high_mapped_pa = node_end_pfn[i]; | ||
282 | #endif | ||
283 | |||
284 | unsigned long pages = high_mapped_pa - node_start_pfn[i]; | ||
285 | HV_VirtAddr addr = (HV_VirtAddr) __va(pa); | ||
286 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); | ||
287 | } | ||
288 | |||
289 | hv_store_mapping((HV_VirtAddr)_stext, | ||
290 | (uint32_t)(_einittext - _stext), 0); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Use hv_inquire_physical() to populate node_{start,end}_pfn[] | ||
295 | * and node_online_map, doing suitable sanity-checking. | ||
296 | * Also set min_low_pfn, max_low_pfn, and max_pfn. | ||
297 | */ | ||
298 | static void __init setup_memory(void) | ||
299 | { | ||
300 | int i, j; | ||
301 | int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; | ||
302 | #ifdef CONFIG_HIGHMEM | ||
303 | long highmem_pages; | ||
304 | #endif | ||
305 | #ifndef __tilegx__ | ||
306 | int cap; | ||
307 | #endif | ||
308 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) | ||
309 | long lowmem_pages; | ||
310 | #endif | ||
311 | |||
312 | /* We are using a char to hold the cpu_2_node[] mapping */ | ||
313 | BUG_ON(MAX_NUMNODES > 127); | ||
314 | |||
315 | /* Discover the ranges of memory available to us */ | ||
316 | for (i = 0; ; ++i) { | ||
317 | unsigned long start, size, end, highbits; | ||
318 | HV_PhysAddrRange range = hv_inquire_physical(i); | ||
319 | if (range.size == 0) | ||
320 | break; | ||
321 | #ifdef CONFIG_FLATMEM | ||
322 | if (i > 0) { | ||
323 | printk("Can't use discontiguous PAs: %#llx..%#llx\n", | ||
324 | range.size, range.start + range.size); | ||
325 | continue; | ||
326 | } | ||
327 | #endif | ||
328 | #ifndef __tilegx__ | ||
329 | if ((unsigned long)range.start) { | ||
330 | printk("Range not at 4GB multiple: %#llx..%#llx\n", | ||
331 | range.start, range.start + range.size); | ||
332 | continue; | ||
333 | } | ||
334 | #endif | ||
335 | if ((range.start & (HPAGE_SIZE-1)) != 0 || | ||
336 | (range.size & (HPAGE_SIZE-1)) != 0) { | ||
337 | unsigned long long start_pa = range.start; | ||
338 | unsigned long long size = range.size; | ||
339 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; | ||
340 | range.size -= (range.start - start_pa); | ||
341 | range.size &= HPAGE_MASK; | ||
342 | printk("Range not hugepage-aligned: %#llx..%#llx:" | ||
343 | " now %#llx-%#llx\n", | ||
344 | start_pa, start_pa + size, | ||
345 | range.start, range.start + range.size); | ||
346 | } | ||
347 | highbits = __pa_to_highbits(range.start); | ||
348 | if (highbits >= NR_PA_HIGHBIT_VALUES) { | ||
349 | printk("PA high bits too high: %#llx..%#llx\n", | ||
350 | range.start, range.start + range.size); | ||
351 | continue; | ||
352 | } | ||
353 | if (highbits_seen[highbits]) { | ||
354 | printk("Range overlaps in high bits: %#llx..%#llx\n", | ||
355 | range.start, range.start + range.size); | ||
356 | continue; | ||
357 | } | ||
358 | highbits_seen[highbits] = 1; | ||
359 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { | ||
360 | int size = maxnodemem_pfn[i]; | ||
361 | if (size > 0) { | ||
362 | printk("Maxnodemem reduced node %d to" | ||
363 | " %d pages\n", i, size); | ||
364 | range.size = (HV_PhysAddr)size << PAGE_SHIFT; | ||
365 | } else { | ||
366 | printk("Maxnodemem disabled node %d\n", i); | ||
367 | continue; | ||
368 | } | ||
369 | } | ||
370 | if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { | ||
371 | int size = maxmem_pfn - num_physpages; | ||
372 | if (size > 0) { | ||
373 | printk("Maxmem reduced node %d to %d pages\n", | ||
374 | i, size); | ||
375 | range.size = (HV_PhysAddr)size << PAGE_SHIFT; | ||
376 | } else { | ||
377 | printk("Maxmem disabled node %d\n", i); | ||
378 | continue; | ||
379 | } | ||
380 | } | ||
381 | if (i >= MAX_NUMNODES) { | ||
382 | printk("Too many PA nodes (#%d): %#llx...%#llx\n", | ||
383 | i, range.size, range.size + range.start); | ||
384 | continue; | ||
385 | } | ||
386 | |||
387 | start = range.start >> PAGE_SHIFT; | ||
388 | size = range.size >> PAGE_SHIFT; | ||
389 | end = start + size; | ||
390 | |||
391 | #ifndef __tilegx__ | ||
392 | if (((HV_PhysAddr)end << PAGE_SHIFT) != | ||
393 | (range.start + range.size)) { | ||
394 | printk("PAs too high to represent: %#llx..%#llx\n", | ||
395 | range.start, range.start + range.size); | ||
396 | continue; | ||
397 | } | ||
398 | #endif | ||
399 | #ifdef CONFIG_PCI | ||
400 | /* | ||
401 | * Blocks that overlap the pci reserved region must | ||
402 | * have enough space to hold the maximum percpu data | ||
403 | * region at the top of the range. If there isn't | ||
404 | * enough space above the reserved region, just | ||
405 | * truncate the node. | ||
406 | */ | ||
407 | if (start <= pci_reserve_start_pfn && | ||
408 | end > pci_reserve_start_pfn) { | ||
409 | unsigned int per_cpu_size = | ||
410 | __per_cpu_end - __per_cpu_start; | ||
411 | unsigned int percpu_pages = | ||
412 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); | ||
413 | if (end < pci_reserve_end_pfn + percpu_pages) { | ||
414 | end = pci_reserve_start_pfn; | ||
415 | printk("PCI mapping region reduced node %d to" | ||
416 | " %ld pages\n", i, end - start); | ||
417 | } | ||
418 | } | ||
419 | #endif | ||
420 | |||
421 | for (j = __pfn_to_highbits(start); | ||
422 | j <= __pfn_to_highbits(end - 1); j++) | ||
423 | highbits_to_node[j] = i; | ||
424 | |||
425 | node_start_pfn[i] = start; | ||
426 | node_end_pfn[i] = end; | ||
427 | node_controller[i] = range.controller; | ||
428 | num_physpages += size; | ||
429 | max_pfn = end; | ||
430 | |||
431 | /* Mark node as online */ | ||
432 | node_set(i, node_online_map); | ||
433 | node_set(i, node_possible_map); | ||
434 | } | ||
435 | |||
436 | #ifndef __tilegx__ | ||
437 | /* | ||
438 | * For 4KB pages, mem_map "struct page" data is 1% of the size | ||
439 | * of the physical memory, so can be quite big (640 MB for | ||
440 | * four 16G zones). These structures must be mapped in | ||
441 | * lowmem, and since we currently cap out at about 768 MB, | ||
442 | * it's impractical to try to use this much address space. | ||
443 | * For now, arbitrarily cap the amount of physical memory | ||
444 | * we're willing to use at 8 million pages (32GB of 4KB pages). | ||
445 | */ | ||
446 | cap = 8 * 1024 * 1024; /* 8 million pages */ | ||
447 | if (num_physpages > cap) { | ||
448 | int num_nodes = num_online_nodes(); | ||
449 | int cap_each = cap / num_nodes; | ||
450 | unsigned long dropped_pages = 0; | ||
451 | for (i = 0; i < num_nodes; ++i) { | ||
452 | int size = node_end_pfn[i] - node_start_pfn[i]; | ||
453 | if (size > cap_each) { | ||
454 | dropped_pages += (size - cap_each); | ||
455 | node_end_pfn[i] = node_start_pfn[i] + cap_each; | ||
456 | } | ||
457 | } | ||
458 | num_physpages -= dropped_pages; | ||
459 | printk(KERN_WARNING "Only using %ldMB memory;" | ||
460 | " ignoring %ldMB.\n", | ||
461 | num_physpages >> (20 - PAGE_SHIFT), | ||
462 | dropped_pages >> (20 - PAGE_SHIFT)); | ||
463 | printk(KERN_WARNING "Consider using a larger page size.\n"); | ||
464 | } | ||
465 | #endif | ||
466 | |||
467 | /* Heap starts just above the last loaded address. */ | ||
468 | min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); | ||
469 | |||
470 | #ifdef CONFIG_HIGHMEM | ||
471 | /* Find where we map lowmem from each controller. */ | ||
472 | high_memory = setup_pa_va_mapping(); | ||
473 | |||
474 | /* Set max_low_pfn based on what node 0 can directly address. */ | ||
475 | max_low_pfn = node_lowmem_end_pfn[0]; | ||
476 | |||
477 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? | ||
478 | MAXMEM_PFN : mappable_physpages; | ||
479 | highmem_pages = (long) (num_physpages - lowmem_pages); | ||
480 | |||
481 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", | ||
482 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); | ||
483 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | ||
484 | pages_to_mb(lowmem_pages)); | ||
485 | #else | ||
486 | /* Set max_low_pfn based on what node 0 can directly address. */ | ||
487 | max_low_pfn = node_end_pfn[0]; | ||
488 | |||
489 | #ifndef __tilegx__ | ||
490 | if (node_end_pfn[0] > MAXMEM_PFN) { | ||
491 | printk(KERN_WARNING "Only using %ldMB LOWMEM.\n", | ||
492 | MAXMEM>>20); | ||
493 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | ||
494 | max_low_pfn = MAXMEM_PFN; | ||
495 | max_pfn = MAXMEM_PFN; | ||
496 | num_physpages = MAXMEM_PFN; | ||
497 | node_end_pfn[0] = MAXMEM_PFN; | ||
498 | } else { | ||
499 | printk(KERN_NOTICE "%ldMB memory available.\n", | ||
500 | pages_to_mb(node_end_pfn[0])); | ||
501 | } | ||
502 | for (i = 1; i < MAX_NUMNODES; ++i) { | ||
503 | node_start_pfn[i] = 0; | ||
504 | node_end_pfn[i] = 0; | ||
505 | } | ||
506 | high_memory = __va(node_end_pfn[0]); | ||
507 | #else | ||
508 | lowmem_pages = 0; | ||
509 | for (i = 0; i < MAX_NUMNODES; ++i) { | ||
510 | int pages = node_end_pfn[i] - node_start_pfn[i]; | ||
511 | lowmem_pages += pages; | ||
512 | if (pages) | ||
513 | high_memory = pfn_to_kaddr(node_end_pfn[i]); | ||
514 | } | ||
515 | printk(KERN_NOTICE "%ldMB memory available.\n", | ||
516 | pages_to_mb(lowmem_pages)); | ||
517 | #endif | ||
518 | #endif | ||
519 | } | ||
520 | |||
521 | static void __init setup_bootmem_allocator(void) | ||
522 | { | ||
523 | unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; | ||
524 | |||
525 | /* Provide a node 0 bdata. */ | ||
526 | NODE_DATA(0)->bdata = &node0_bdata; | ||
527 | |||
528 | #ifdef CONFIG_PCI | ||
529 | /* Don't let boot memory alias the PCI region. */ | ||
530 | last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); | ||
531 | #else | ||
532 | last_alloc_pfn = max_low_pfn; | ||
533 | #endif | ||
534 | |||
535 | /* | ||
536 | * Initialize the boot-time allocator (with low memory only): | ||
537 | * The first argument says where to put the bitmap, and the | ||
538 | * second says where the end of allocatable memory is. | ||
539 | */ | ||
540 | bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); | ||
541 | |||
542 | /* | ||
543 | * Let the bootmem allocator use all the space we've given it | ||
544 | * except for its own bitmap. | ||
545 | */ | ||
546 | first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); | ||
547 | if (first_alloc_pfn >= last_alloc_pfn) | ||
548 | early_panic("Not enough memory on controller 0 for bootmem\n"); | ||
549 | |||
550 | free_bootmem(PFN_PHYS(first_alloc_pfn), | ||
551 | PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); | ||
552 | |||
553 | #ifdef CONFIG_KEXEC | ||
554 | if (crashk_res.start != crashk_res.end) | ||
555 | reserve_bootmem(crashk_res.start, | ||
556 | crashk_res.end - crashk_res.start + 1, 0); | ||
557 | #endif | ||
558 | |||
559 | } | ||
560 | |||
561 | void *__init alloc_remap(int nid, unsigned long size) | ||
562 | { | ||
563 | int pages = node_end_pfn[nid] - node_start_pfn[nid]; | ||
564 | void *map = pfn_to_kaddr(node_memmap_pfn[nid]); | ||
565 | BUG_ON(size != pages * sizeof(struct page)); | ||
566 | memset(map, 0, size); | ||
567 | return map; | ||
568 | } | ||
569 | |||
570 | static int __init percpu_size(void) | ||
571 | { | ||
572 | int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); | ||
573 | #ifdef CONFIG_MODULES | ||
574 | if (size < PERCPU_ENOUGH_ROOM) | ||
575 | size = PERCPU_ENOUGH_ROOM; | ||
576 | #endif | ||
577 | /* In several places we assume the per-cpu data fits on a huge page. */ | ||
578 | BUG_ON(kdata_huge && size > HPAGE_SIZE); | ||
579 | return size; | ||
580 | } | ||
581 | |||
582 | static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) | ||
583 | { | ||
584 | void *kva = __alloc_bootmem(size, PAGE_SIZE, goal); | ||
585 | unsigned long pfn = kaddr_to_pfn(kva); | ||
586 | BUG_ON(goal && PFN_PHYS(pfn) != goal); | ||
587 | return pfn; | ||
588 | } | ||
589 | |||
590 | static void __init zone_sizes_init(void) | ||
591 | { | ||
592 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | ||
593 | unsigned long node_percpu[MAX_NUMNODES] = { 0 }; | ||
594 | int size = percpu_size(); | ||
595 | int num_cpus = smp_height * smp_width; | ||
596 | int i; | ||
597 | |||
598 | for (i = 0; i < num_cpus; ++i) | ||
599 | node_percpu[cpu_to_node(i)] += size; | ||
600 | |||
601 | for_each_online_node(i) { | ||
602 | unsigned long start = node_start_pfn[i]; | ||
603 | unsigned long end = node_end_pfn[i]; | ||
604 | #ifdef CONFIG_HIGHMEM | ||
605 | unsigned long lowmem_end = node_lowmem_end_pfn[i]; | ||
606 | #else | ||
607 | unsigned long lowmem_end = end; | ||
608 | #endif | ||
609 | int memmap_size = (end - start) * sizeof(struct page); | ||
610 | node_free_pfn[i] = start; | ||
611 | |||
612 | /* | ||
613 | * Set aside pages for per-cpu data and the mem_map array. | ||
614 | * | ||
615 | * Since the per-cpu data requires special homecaching, | ||
616 | * if we are in kdata_huge mode, we put it at the end of | ||
617 | * the lowmem region. If we're not in kdata_huge mode, | ||
618 | * we take the per-cpu pages from the bottom of the | ||
619 | * controller, since that avoids fragmenting a huge page | ||
620 | * that users might want. We always take the memmap | ||
621 | * from the bottom of the controller, since with | ||
622 | * kdata_huge that lets it be under a huge TLB entry. | ||
623 | * | ||
624 | * If the user has requested isolnodes for a controller, | ||
625 | * though, there'll be no lowmem, so we just alloc_bootmem | ||
626 | * the memmap. There will be no percpu memory either. | ||
627 | */ | ||
628 | if (__pfn_to_highbits(start) == 0) { | ||
629 | /* In low PAs, allocate via bootmem. */ | ||
630 | unsigned long goal = 0; | ||
631 | node_memmap_pfn[i] = | ||
632 | alloc_bootmem_pfn(memmap_size, goal); | ||
633 | if (kdata_huge) | ||
634 | goal = PFN_PHYS(lowmem_end) - node_percpu[i]; | ||
635 | if (node_percpu[i]) | ||
636 | node_percpu_pfn[i] = | ||
637 | alloc_bootmem_pfn(node_percpu[i], goal); | ||
638 | } else if (cpu_isset(i, isolnodes)) { | ||
639 | node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0); | ||
640 | BUG_ON(node_percpu[i] != 0); | ||
641 | } else { | ||
642 | /* In high PAs, just reserve some pages. */ | ||
643 | node_memmap_pfn[i] = node_free_pfn[i]; | ||
644 | node_free_pfn[i] += PFN_UP(memmap_size); | ||
645 | if (!kdata_huge) { | ||
646 | node_percpu_pfn[i] = node_free_pfn[i]; | ||
647 | node_free_pfn[i] += PFN_UP(node_percpu[i]); | ||
648 | } else { | ||
649 | node_percpu_pfn[i] = | ||
650 | lowmem_end - PFN_UP(node_percpu[i]); | ||
651 | } | ||
652 | } | ||
653 | |||
654 | #ifdef CONFIG_HIGHMEM | ||
655 | if (start > lowmem_end) { | ||
656 | zones_size[ZONE_NORMAL] = 0; | ||
657 | zones_size[ZONE_HIGHMEM] = end - start; | ||
658 | } else { | ||
659 | zones_size[ZONE_NORMAL] = lowmem_end - start; | ||
660 | zones_size[ZONE_HIGHMEM] = end - lowmem_end; | ||
661 | } | ||
662 | #else | ||
663 | zones_size[ZONE_NORMAL] = end - start; | ||
664 | #endif | ||
665 | |||
666 | /* | ||
667 | * Everyone shares node 0's bootmem allocator, but | ||
668 | * we use alloc_remap(), above, to put the actual | ||
669 | * struct page array on the individual controllers, | ||
670 | * which is most of the data that we actually care about. | ||
671 | * We can't place bootmem allocators on the other | ||
672 | * controllers since the bootmem allocator can only | ||
673 | * operate on 32-bit physical addresses. | ||
674 | */ | ||
675 | NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; | ||
676 | |||
677 | free_area_init_node(i, zones_size, start, NULL); | ||
678 | printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", | ||
679 | PFN_UP(node_percpu[i])); | ||
680 | |||
681 | /* Track the type of memory on each node */ | ||
682 | if (zones_size[ZONE_NORMAL]) | ||
683 | node_set_state(i, N_NORMAL_MEMORY); | ||
684 | #ifdef CONFIG_HIGHMEM | ||
685 | if (end != start) | ||
686 | node_set_state(i, N_HIGH_MEMORY); | ||
687 | #endif | ||
688 | |||
689 | node_set_online(i); | ||
690 | } | ||
691 | } | ||
692 | |||
693 | #ifdef CONFIG_NUMA | ||
694 | |||
695 | /* which logical CPUs are on which nodes */ | ||
696 | struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; | ||
697 | EXPORT_SYMBOL(node_2_cpu_mask); | ||
698 | |||
699 | /* which node each logical CPU is on */ | ||
700 | char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); | ||
701 | EXPORT_SYMBOL(cpu_2_node); | ||
702 | |||
703 | /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ | ||
704 | static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) | ||
705 | { | ||
706 | if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) | ||
707 | return -1; | ||
708 | else | ||
709 | return cpu_to_node(cpu); | ||
710 | } | ||
711 | |||
712 | /* Return number of immediately-adjacent tiles sharing the same NUMA node. */ | ||
713 | static int __init node_neighbors(int node, int cpu, | ||
714 | struct cpumask *unbound_cpus) | ||
715 | { | ||
716 | int neighbors = 0; | ||
717 | int w = smp_width; | ||
718 | int h = smp_height; | ||
719 | int x = cpu % w; | ||
720 | int y = cpu / w; | ||
721 | if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) | ||
722 | ++neighbors; | ||
723 | if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) | ||
724 | ++neighbors; | ||
725 | if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) | ||
726 | ++neighbors; | ||
727 | if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) | ||
728 | ++neighbors; | ||
729 | return neighbors; | ||
730 | } | ||
731 | |||
732 | static void __init setup_numa_mapping(void) | ||
733 | { | ||
734 | int distance[MAX_NUMNODES][NR_CPUS]; | ||
735 | HV_Coord coord; | ||
736 | int cpu, node, cpus, i, x, y; | ||
737 | int num_nodes = num_online_nodes(); | ||
738 | struct cpumask unbound_cpus; | ||
739 | nodemask_t default_nodes; | ||
740 | |||
741 | cpumask_clear(&unbound_cpus); | ||
742 | |||
743 | /* Get set of nodes we will use for defaults */ | ||
744 | nodes_andnot(default_nodes, node_online_map, isolnodes); | ||
745 | if (nodes_empty(default_nodes)) { | ||
746 | BUG_ON(!node_isset(0, node_online_map)); | ||
747 | printk("Forcing NUMA node zero available as a default node\n"); | ||
748 | node_set(0, default_nodes); | ||
749 | } | ||
750 | |||
751 | /* Populate the distance[] array */ | ||
752 | memset(distance, -1, sizeof(distance)); | ||
753 | cpu = 0; | ||
754 | for (coord.y = 0; coord.y < smp_height; ++coord.y) { | ||
755 | for (coord.x = 0; coord.x < smp_width; | ||
756 | ++coord.x, ++cpu) { | ||
757 | BUG_ON(cpu >= nr_cpu_ids); | ||
758 | if (!cpu_possible(cpu)) { | ||
759 | cpu_2_node[cpu] = -1; | ||
760 | continue; | ||
761 | } | ||
762 | for_each_node_mask(node, default_nodes) { | ||
763 | HV_MemoryControllerInfo info = | ||
764 | hv_inquire_memory_controller( | ||
765 | coord, node_controller[node]); | ||
766 | distance[node][cpu] = | ||
767 | ABS(info.coord.x) + ABS(info.coord.y); | ||
768 | } | ||
769 | cpumask_set_cpu(cpu, &unbound_cpus); | ||
770 | } | ||
771 | } | ||
772 | cpus = cpu; | ||
773 | |||
774 | /* | ||
775 | * Round-robin through the NUMA nodes until all the cpus are | ||
776 | * assigned. We could be more clever here (e.g. create four | ||
777 | * sorted linked lists on the same set of cpu nodes, and pull | ||
778 | * off them in round-robin sequence, removing from all four | ||
779 | * lists each time) but given the relatively small numbers | ||
780 | * involved, O(n^2) seem OK for a one-time cost. | ||
781 | */ | ||
782 | node = first_node(default_nodes); | ||
783 | while (!cpumask_empty(&unbound_cpus)) { | ||
784 | int best_cpu = -1; | ||
785 | int best_distance = INT_MAX; | ||
786 | for (cpu = 0; cpu < cpus; ++cpu) { | ||
787 | if (cpumask_test_cpu(cpu, &unbound_cpus)) { | ||
788 | /* | ||
789 | * Compute metric, which is how much | ||
790 | * closer the cpu is to this memory | ||
791 | * controller than the others, shifted | ||
792 | * up, and then the number of | ||
793 | * neighbors already in the node as an | ||
794 | * epsilon adjustment to try to keep | ||
795 | * the nodes compact. | ||
796 | */ | ||
797 | int d = distance[node][cpu] * num_nodes; | ||
798 | for_each_node_mask(i, default_nodes) { | ||
799 | if (i != node) | ||
800 | d -= distance[i][cpu]; | ||
801 | } | ||
802 | d *= 8; /* allow space for epsilon */ | ||
803 | d -= node_neighbors(node, cpu, &unbound_cpus); | ||
804 | if (d < best_distance) { | ||
805 | best_cpu = cpu; | ||
806 | best_distance = d; | ||
807 | } | ||
808 | } | ||
809 | } | ||
810 | BUG_ON(best_cpu < 0); | ||
811 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); | ||
812 | cpu_2_node[best_cpu] = node; | ||
813 | cpumask_clear_cpu(best_cpu, &unbound_cpus); | ||
814 | node = next_node(node, default_nodes); | ||
815 | if (node == MAX_NUMNODES) | ||
816 | node = first_node(default_nodes); | ||
817 | } | ||
818 | |||
819 | /* Print out node assignments and set defaults for disabled cpus */ | ||
820 | cpu = 0; | ||
821 | for (y = 0; y < smp_height; ++y) { | ||
822 | printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); | ||
823 | for (x = 0; x < smp_width; ++x, ++cpu) { | ||
824 | if (cpu_to_node(cpu) < 0) { | ||
825 | printk(" -"); | ||
826 | cpu_2_node[cpu] = first_node(default_nodes); | ||
827 | } else { | ||
828 | printk(" %d", cpu_to_node(cpu)); | ||
829 | } | ||
830 | } | ||
831 | printk("\n"); | ||
832 | } | ||
833 | } | ||
834 | |||
835 | static struct cpu cpu_devices[NR_CPUS]; | ||
836 | |||
837 | static int __init topology_init(void) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | for_each_online_node(i) | ||
842 | register_one_node(i); | ||
843 | |||
844 | for_each_present_cpu(i) | ||
845 | register_cpu(&cpu_devices[i], i); | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | subsys_initcall(topology_init); | ||
851 | |||
852 | #else /* !CONFIG_NUMA */ | ||
853 | |||
854 | #define setup_numa_mapping() do { } while (0) | ||
855 | |||
856 | #endif /* CONFIG_NUMA */ | ||
857 | |||
858 | /** | ||
859 | * setup_mpls() - Allow the user-space code to access various SPRs. | ||
860 | * | ||
861 | * Also called from online_secondary(). | ||
862 | */ | ||
863 | void __cpuinit setup_mpls(void) | ||
864 | { | ||
865 | /* Allow asynchronous TLB interrupts. */ | ||
866 | #if CHIP_HAS_TILE_DMA() | ||
867 | raw_local_irq_unmask(INT_DMATLB_MISS); | ||
868 | raw_local_irq_unmask(INT_DMATLB_ACCESS); | ||
869 | #endif | ||
870 | #if CHIP_HAS_SN_PROC() | ||
871 | raw_local_irq_unmask(INT_SNITLB_MISS); | ||
872 | #endif | ||
873 | |||
874 | /* | ||
875 | * Allow user access to many generic SPRs, like the cycle | ||
876 | * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. | ||
877 | */ | ||
878 | __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); | ||
879 | |||
880 | #if CHIP_HAS_SN() | ||
881 | /* Static network is not restricted. */ | ||
882 | __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); | ||
883 | #endif | ||
884 | #if CHIP_HAS_SN_PROC() | ||
885 | __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); | ||
886 | __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); | ||
887 | #endif | ||
888 | |||
889 | /* | ||
890 | * Set the MPL for interrupt control 0 to user level. | ||
891 | * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, | ||
892 | * as well as the PL 0 interrupt mask. | ||
893 | */ | ||
894 | __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); | ||
895 | } | ||
896 | |||
897 | static int __initdata set_initramfs_file; | ||
898 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | ||
899 | |||
900 | static int __init setup_initramfs_file(char *str) | ||
901 | { | ||
902 | if (str == NULL) | ||
903 | return -EINVAL; | ||
904 | strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); | ||
905 | set_initramfs_file = 1; | ||
906 | |||
907 | return 0; | ||
908 | } | ||
909 | early_param("initramfs_file", setup_initramfs_file); | ||
910 | |||
911 | /* | ||
912 | * We look for an additional "initramfs.cpio.gz" file in the hvfs. | ||
913 | * If there is one, we allocate some memory for it and it will be | ||
914 | * unpacked to the initramfs after any built-in initramfs_data. | ||
915 | */ | ||
916 | static void __init load_hv_initrd(void) | ||
917 | { | ||
918 | HV_FS_StatInfo stat; | ||
919 | int fd, rc; | ||
920 | void *initrd; | ||
921 | |||
922 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | ||
923 | if (fd == HV_ENOENT) { | ||
924 | if (set_initramfs_file) | ||
925 | printk("No such hvfs initramfs file '%s'\n", | ||
926 | initramfs_file); | ||
927 | return; | ||
928 | } | ||
929 | BUG_ON(fd < 0); | ||
930 | stat = hv_fs_fstat(fd); | ||
931 | BUG_ON(stat.size < 0); | ||
932 | if (stat.flags & HV_FS_ISDIR) { | ||
933 | printk("Ignoring hvfs file '%s': it's a directory.\n", | ||
934 | initramfs_file); | ||
935 | return; | ||
936 | } | ||
937 | initrd = alloc_bootmem_pages(stat.size); | ||
938 | rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); | ||
939 | if (rc != stat.size) { | ||
940 | printk("Error reading %d bytes from hvfs file '%s': %d\n", | ||
941 | stat.size, initramfs_file, rc); | ||
942 | free_bootmem((unsigned long) initrd, stat.size); | ||
943 | return; | ||
944 | } | ||
945 | initrd_start = (unsigned long) initrd; | ||
946 | initrd_end = initrd_start + stat.size; | ||
947 | } | ||
948 | |||
949 | void __init free_initrd_mem(unsigned long begin, unsigned long end) | ||
950 | { | ||
951 | free_bootmem(begin, end - begin); | ||
952 | } | ||
953 | |||
954 | static void __init validate_hv(void) | ||
955 | { | ||
956 | /* | ||
957 | * It may already be too late, but let's check our built-in | ||
958 | * configuration against what the hypervisor is providing. | ||
959 | */ | ||
960 | unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); | ||
961 | int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); | ||
962 | int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); | ||
963 | HV_ASIDRange asid_range; | ||
964 | |||
965 | #ifndef CONFIG_SMP | ||
966 | HV_Topology topology = hv_inquire_topology(); | ||
967 | BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); | ||
968 | if (topology.width != 1 || topology.height != 1) { | ||
969 | printk("Warning: booting UP kernel on %dx%d grid;" | ||
970 | " will ignore all but first tile.\n", | ||
971 | topology.width, topology.height); | ||
972 | } | ||
973 | #endif | ||
974 | |||
975 | if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) | ||
976 | early_panic("Hypervisor glue size %ld is too big!\n", | ||
977 | glue_size); | ||
978 | if (hv_page_size != PAGE_SIZE) | ||
979 | early_panic("Hypervisor page size %#x != our %#lx\n", | ||
980 | hv_page_size, PAGE_SIZE); | ||
981 | if (hv_hpage_size != HPAGE_SIZE) | ||
982 | early_panic("Hypervisor huge page size %#x != our %#lx\n", | ||
983 | hv_hpage_size, HPAGE_SIZE); | ||
984 | |||
985 | #ifdef CONFIG_SMP | ||
986 | /* | ||
987 | * Some hypervisor APIs take a pointer to a bitmap array | ||
988 | * whose size is at least the number of cpus on the chip. | ||
989 | * We use a struct cpumask for this, so it must be big enough. | ||
990 | */ | ||
991 | if ((smp_height * smp_width) > nr_cpu_ids) | ||
992 | early_panic("Hypervisor %d x %d grid too big for Linux" | ||
993 | " NR_CPUS %d\n", smp_height, smp_width, | ||
994 | nr_cpu_ids); | ||
995 | #endif | ||
996 | |||
997 | /* | ||
998 | * Check that we're using allowed ASIDs, and initialize the | ||
999 | * various asid variables to their appropriate initial states. | ||
1000 | */ | ||
1001 | asid_range = hv_inquire_asid(0); | ||
1002 | __get_cpu_var(current_asid) = min_asid = asid_range.start; | ||
1003 | max_asid = asid_range.start + asid_range.size - 1; | ||
1004 | |||
1005 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, | ||
1006 | sizeof(chip_model)) < 0) { | ||
1007 | printk("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); | ||
1008 | strlcpy(chip_model, "unknown", sizeof(chip_model)); | ||
1009 | } | ||
1010 | } | ||
1011 | |||
1012 | static void __init validate_va(void) | ||
1013 | { | ||
1014 | #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ | ||
1015 | /* | ||
1016 | * Similarly, make sure we're only using allowed VAs. | ||
1017 | * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, | ||
1018 | * and 0 .. KERNEL_HIGH_VADDR. | ||
1019 | * In addition, make sure we CAN'T use the end of memory, since | ||
1020 | * we use the last chunk of each pgd for the pgd_list. | ||
1021 | */ | ||
1022 | int i, fc_fd_ok = 0; | ||
1023 | unsigned long max_va = 0; | ||
1024 | unsigned long list_va = | ||
1025 | ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); | ||
1026 | |||
1027 | for (i = 0; ; ++i) { | ||
1028 | HV_VirtAddrRange range = hv_inquire_virtual(i); | ||
1029 | if (range.size == 0) | ||
1030 | break; | ||
1031 | if (range.start <= MEM_USER_INTRPT && | ||
1032 | range.start + range.size >= MEM_HV_INTRPT) | ||
1033 | fc_fd_ok = 1; | ||
1034 | if (range.start == 0) | ||
1035 | max_va = range.size; | ||
1036 | BUG_ON(range.start + range.size > list_va); | ||
1037 | } | ||
1038 | if (!fc_fd_ok) | ||
1039 | early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); | ||
1040 | if (max_va == 0) | ||
1041 | early_panic("Hypervisor not configured for low VAs\n"); | ||
1042 | if (max_va < KERNEL_HIGH_VADDR) | ||
1043 | early_panic("Hypervisor max VA %#lx smaller than %#lx\n", | ||
1044 | max_va, KERNEL_HIGH_VADDR); | ||
1045 | |||
1046 | /* Kernel PCs must have their high bit set; see intvec.S. */ | ||
1047 | if ((long)VMALLOC_START >= 0) | ||
1048 | early_panic( | ||
1049 | "Linux VMALLOC region below the 2GB line (%#lx)!\n" | ||
1050 | "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" | ||
1051 | "or smaller VMALLOC_RESERVE.\n", | ||
1052 | VMALLOC_START); | ||
1053 | #endif | ||
1054 | } | ||
1055 | |||
1056 | /* | ||
1057 | * cpu_lotar_map lists all the cpus that are valid for the supervisor | ||
1058 | * to cache data on at a page level, i.e. what cpus can be placed in | ||
1059 | * the LOTAR field of a PTE. It is equivalent to the set of possible | ||
1060 | * cpus plus any other cpus that are willing to share their cache. | ||
1061 | * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). | ||
1062 | */ | ||
1063 | struct cpumask __write_once cpu_lotar_map; | ||
1064 | EXPORT_SYMBOL(cpu_lotar_map); | ||
1065 | |||
1066 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1067 | /* | ||
1068 | * hash_for_home_map lists all the tiles that hash-for-home data | ||
1069 | * will be cached on. Note that this may includes tiles that are not | ||
1070 | * valid for this supervisor to use otherwise (e.g. if a hypervisor | ||
1071 | * device is being shared between multiple supervisors). | ||
1072 | * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). | ||
1073 | */ | ||
1074 | struct cpumask hash_for_home_map; | ||
1075 | EXPORT_SYMBOL(hash_for_home_map); | ||
1076 | #endif | ||
1077 | |||
1078 | /* | ||
1079 | * cpu_cacheable_map lists all the cpus whose caches the hypervisor can | ||
1080 | * flush on our behalf. It is set to cpu_possible_map OR'ed with | ||
1081 | * hash_for_home_map, and it is what should be passed to | ||
1082 | * hv_flush_remote() to flush all caches. Note that if there are | ||
1083 | * dedicated hypervisor driver tiles that have authorized use of their | ||
1084 | * cache, those tiles will only appear in cpu_lotar_map, NOT in | ||
1085 | * cpu_cacheable_map, as they are a special case. | ||
1086 | */ | ||
1087 | struct cpumask __write_once cpu_cacheable_map; | ||
1088 | EXPORT_SYMBOL(cpu_cacheable_map); | ||
1089 | |||
1090 | static __initdata struct cpumask disabled_map; | ||
1091 | |||
1092 | static int __init disabled_cpus(char *str) | ||
1093 | { | ||
1094 | int boot_cpu = smp_processor_id(); | ||
1095 | |||
1096 | if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) | ||
1097 | return -EINVAL; | ||
1098 | if (cpumask_test_cpu(boot_cpu, &disabled_map)) { | ||
1099 | printk("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); | ||
1100 | cpumask_clear_cpu(boot_cpu, &disabled_map); | ||
1101 | } | ||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | early_param("disabled_cpus", disabled_cpus); | ||
1106 | |||
1107 | void __init print_disabled_cpus() | ||
1108 | { | ||
1109 | if (!cpumask_empty(&disabled_map)) { | ||
1110 | char buf[100]; | ||
1111 | cpulist_scnprintf(buf, sizeof(buf), &disabled_map); | ||
1112 | printk(KERN_INFO "CPUs not available for Linux: %s\n", buf); | ||
1113 | } | ||
1114 | } | ||
1115 | |||
1116 | static void __init setup_cpu_maps(void) | ||
1117 | { | ||
1118 | struct cpumask hv_disabled_map, cpu_possible_init; | ||
1119 | int boot_cpu = smp_processor_id(); | ||
1120 | int cpus, i, rc; | ||
1121 | |||
1122 | /* Learn which cpus are allowed by the hypervisor. */ | ||
1123 | rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, | ||
1124 | (HV_VirtAddr) cpumask_bits(&cpu_possible_init), | ||
1125 | sizeof(cpu_cacheable_map)); | ||
1126 | if (rc < 0) | ||
1127 | early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); | ||
1128 | if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) | ||
1129 | early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); | ||
1130 | |||
1131 | /* Compute the cpus disabled by the hvconfig file. */ | ||
1132 | cpumask_complement(&hv_disabled_map, &cpu_possible_init); | ||
1133 | |||
1134 | /* Include them with the cpus disabled by "disabled_cpus". */ | ||
1135 | cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); | ||
1136 | |||
1137 | /* | ||
1138 | * Disable every cpu after "setup_max_cpus". But don't mark | ||
1139 | * as disabled the cpus that are outside of our initial rectangle, | ||
1140 | * since that turns out to be confusing. | ||
1141 | */ | ||
1142 | cpus = 1; /* this cpu */ | ||
1143 | cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ | ||
1144 | for (i = 0; cpus < setup_max_cpus; ++i) | ||
1145 | if (!cpumask_test_cpu(i, &disabled_map)) | ||
1146 | ++cpus; | ||
1147 | for (; i < smp_height * smp_width; ++i) | ||
1148 | cpumask_set_cpu(i, &disabled_map); | ||
1149 | cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ | ||
1150 | for (i = smp_height * smp_width; i < NR_CPUS; ++i) | ||
1151 | cpumask_clear_cpu(i, &disabled_map); | ||
1152 | |||
1153 | /* | ||
1154 | * Setup cpu_possible map as every cpu allocated to us, minus | ||
1155 | * the results of any "disabled_cpus" settings. | ||
1156 | */ | ||
1157 | cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); | ||
1158 | init_cpu_possible(&cpu_possible_init); | ||
1159 | |||
1160 | /* Learn which cpus are valid for LOTAR caching. */ | ||
1161 | rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, | ||
1162 | (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), | ||
1163 | sizeof(cpu_lotar_map)); | ||
1164 | if (rc < 0) { | ||
1165 | printk("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); | ||
1166 | cpu_lotar_map = cpu_possible_map; | ||
1167 | } | ||
1168 | |||
1169 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
1170 | /* Retrieve set of CPUs used for hash-for-home caching */ | ||
1171 | rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, | ||
1172 | (HV_VirtAddr) hash_for_home_map.bits, | ||
1173 | sizeof(hash_for_home_map)); | ||
1174 | if (rc < 0) | ||
1175 | early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); | ||
1176 | cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); | ||
1177 | #else | ||
1178 | cpu_cacheable_map = cpu_possible_map; | ||
1179 | #endif | ||
1180 | } | ||
1181 | |||
1182 | |||
1183 | static int __init dataplane(char *str) | ||
1184 | { | ||
1185 | printk("WARNING: dataplane support disabled in this kernel\n"); | ||
1186 | return 0; | ||
1187 | } | ||
1188 | |||
1189 | early_param("dataplane", dataplane); | ||
1190 | |||
1191 | #ifdef CONFIG_CMDLINE_BOOL | ||
1192 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | ||
1193 | #endif | ||
1194 | |||
1195 | void __init setup_arch(char **cmdline_p) | ||
1196 | { | ||
1197 | int len; | ||
1198 | |||
1199 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) | ||
1200 | len = hv_get_command_line((HV_VirtAddr) boot_command_line, | ||
1201 | COMMAND_LINE_SIZE); | ||
1202 | if (boot_command_line[0]) | ||
1203 | printk("WARNING: ignoring dynamic command line \"%s\"\n", | ||
1204 | boot_command_line); | ||
1205 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
1206 | #else | ||
1207 | char *hv_cmdline; | ||
1208 | #if defined(CONFIG_CMDLINE_BOOL) | ||
1209 | if (builtin_cmdline[0]) { | ||
1210 | int builtin_len = strlcpy(boot_command_line, builtin_cmdline, | ||
1211 | COMMAND_LINE_SIZE); | ||
1212 | if (builtin_len < COMMAND_LINE_SIZE-1) | ||
1213 | boot_command_line[builtin_len++] = ' '; | ||
1214 | hv_cmdline = &boot_command_line[builtin_len]; | ||
1215 | len = COMMAND_LINE_SIZE - builtin_len; | ||
1216 | } else | ||
1217 | #endif | ||
1218 | { | ||
1219 | hv_cmdline = boot_command_line; | ||
1220 | len = COMMAND_LINE_SIZE; | ||
1221 | } | ||
1222 | len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); | ||
1223 | if (len < 0 || len > COMMAND_LINE_SIZE) | ||
1224 | early_panic("hv_get_command_line failed: %d\n", len); | ||
1225 | #endif | ||
1226 | |||
1227 | *cmdline_p = boot_command_line; | ||
1228 | |||
1229 | /* Set disabled_map and setup_max_cpus very early */ | ||
1230 | parse_early_param(); | ||
1231 | |||
1232 | /* Make sure the kernel is compatible with the hypervisor. */ | ||
1233 | validate_hv(); | ||
1234 | validate_va(); | ||
1235 | |||
1236 | setup_cpu_maps(); | ||
1237 | |||
1238 | |||
1239 | #ifdef CONFIG_PCI | ||
1240 | /* | ||
1241 | * Initialize the PCI structures. This is done before memory | ||
1242 | * setup so that we know whether or not a pci_reserve region | ||
1243 | * is necessary. | ||
1244 | */ | ||
1245 | if (tile_pci_init() == 0) | ||
1246 | pci_reserve_mb = 0; | ||
1247 | |||
1248 | /* PCI systems reserve a region just below 4GB for mapping iomem. */ | ||
1249 | pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); | ||
1250 | pci_reserve_start_pfn = pci_reserve_end_pfn - | ||
1251 | (pci_reserve_mb << (20 - PAGE_SHIFT)); | ||
1252 | #endif | ||
1253 | |||
1254 | init_mm.start_code = (unsigned long) _text; | ||
1255 | init_mm.end_code = (unsigned long) _etext; | ||
1256 | init_mm.end_data = (unsigned long) _edata; | ||
1257 | init_mm.brk = (unsigned long) _end; | ||
1258 | |||
1259 | setup_memory(); | ||
1260 | store_permanent_mappings(); | ||
1261 | setup_bootmem_allocator(); | ||
1262 | |||
1263 | /* | ||
1264 | * NOTE: before this point _nobody_ is allowed to allocate | ||
1265 | * any memory using the bootmem allocator. | ||
1266 | */ | ||
1267 | |||
1268 | paging_init(); | ||
1269 | setup_numa_mapping(); | ||
1270 | zone_sizes_init(); | ||
1271 | set_page_homes(); | ||
1272 | setup_mpls(); | ||
1273 | setup_clock(); | ||
1274 | load_hv_initrd(); | ||
1275 | } | ||
1276 | |||
1277 | |||
1278 | /* | ||
1279 | * Set up per-cpu memory. | ||
1280 | */ | ||
1281 | |||
1282 | unsigned long __per_cpu_offset[NR_CPUS] __write_once; | ||
1283 | EXPORT_SYMBOL(__per_cpu_offset); | ||
1284 | |||
1285 | static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; | ||
1286 | static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; | ||
1287 | |||
1288 | /* | ||
1289 | * As the percpu code allocates pages, we return the pages from the | ||
1290 | * end of the node for the specified cpu. | ||
1291 | */ | ||
1292 | static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | ||
1293 | { | ||
1294 | int nid = cpu_to_node(cpu); | ||
1295 | unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; | ||
1296 | |||
1297 | BUG_ON(size % PAGE_SIZE != 0); | ||
1298 | pfn_offset[nid] += size / PAGE_SIZE; | ||
1299 | if (percpu_pfn[cpu] == 0) | ||
1300 | percpu_pfn[cpu] = pfn; | ||
1301 | return pfn_to_kaddr(pfn); | ||
1302 | } | ||
1303 | |||
1304 | /* | ||
1305 | * Pages reserved for percpu memory are not freeable, and in any case we are | ||
1306 | * on a short path to panic() in setup_per_cpu_area() at this point anyway. | ||
1307 | */ | ||
1308 | static void __init pcpu_fc_free(void *ptr, size_t size) | ||
1309 | { | ||
1310 | } | ||
1311 | |||
1312 | /* | ||
1313 | * Set up vmalloc page tables using bootmem for the percpu code. | ||
1314 | */ | ||
1315 | static void __init pcpu_fc_populate_pte(unsigned long addr) | ||
1316 | { | ||
1317 | pgd_t *pgd; | ||
1318 | pud_t *pud; | ||
1319 | pmd_t *pmd; | ||
1320 | pte_t *pte; | ||
1321 | |||
1322 | BUG_ON(pgd_addr_invalid(addr)); | ||
1323 | |||
1324 | pgd = swapper_pg_dir + pgd_index(addr); | ||
1325 | pud = pud_offset(pgd, addr); | ||
1326 | BUG_ON(!pud_present(*pud)); | ||
1327 | pmd = pmd_offset(pud, addr); | ||
1328 | if (pmd_present(*pmd)) { | ||
1329 | BUG_ON(pmd_huge_page(*pmd)); | ||
1330 | } else { | ||
1331 | pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, | ||
1332 | HV_PAGE_TABLE_ALIGN, 0); | ||
1333 | pmd_populate_kernel(&init_mm, pmd, pte); | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | void __init setup_per_cpu_areas(void) | ||
1338 | { | ||
1339 | struct page *pg; | ||
1340 | unsigned long delta, pfn, lowmem_va; | ||
1341 | unsigned long size = percpu_size(); | ||
1342 | char *ptr; | ||
1343 | int rc, cpu, i; | ||
1344 | |||
1345 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, | ||
1346 | pcpu_fc_free, pcpu_fc_populate_pte); | ||
1347 | if (rc < 0) | ||
1348 | panic("Cannot initialize percpu area (err=%d)", rc); | ||
1349 | |||
1350 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | ||
1351 | for_each_possible_cpu(cpu) { | ||
1352 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | ||
1353 | |||
1354 | /* finv the copy out of cache so we can change homecache */ | ||
1355 | ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; | ||
1356 | __finv_buffer(ptr, size); | ||
1357 | pfn = percpu_pfn[cpu]; | ||
1358 | |||
1359 | /* Rewrite the page tables to cache on that cpu */ | ||
1360 | pg = pfn_to_page(pfn); | ||
1361 | for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { | ||
1362 | |||
1363 | /* Update the vmalloc mapping and page home. */ | ||
1364 | pte_t *ptep = | ||
1365 | virt_to_pte(NULL, (unsigned long)ptr + i); | ||
1366 | pte_t pte = *ptep; | ||
1367 | BUG_ON(pfn != pte_pfn(pte)); | ||
1368 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
1369 | pte = set_remote_cache_cpu(pte, cpu); | ||
1370 | set_pte(ptep, pte); | ||
1371 | |||
1372 | /* Update the lowmem mapping for consistency. */ | ||
1373 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | ||
1374 | ptep = virt_to_pte(NULL, lowmem_va); | ||
1375 | if (pte_huge(*ptep)) { | ||
1376 | printk(KERN_DEBUG "early shatter of huge page" | ||
1377 | " at %#lx\n", lowmem_va); | ||
1378 | shatter_pmd((pmd_t *)ptep); | ||
1379 | ptep = virt_to_pte(NULL, lowmem_va); | ||
1380 | BUG_ON(pte_huge(*ptep)); | ||
1381 | } | ||
1382 | BUG_ON(pfn != pte_pfn(*ptep)); | ||
1383 | set_pte(ptep, pte); | ||
1384 | } | ||
1385 | } | ||
1386 | |||
1387 | /* Set our thread pointer appropriately. */ | ||
1388 | set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); | ||
1389 | |||
1390 | /* Make sure the finv's have completed. */ | ||
1391 | mb_incoherent(); | ||
1392 | |||
1393 | /* Flush the TLB so we reference it properly from here on out. */ | ||
1394 | local_flush_tlb_all(); | ||
1395 | } | ||
1396 | |||
1397 | static struct resource data_resource = { | ||
1398 | .name = "Kernel data", | ||
1399 | .start = 0, | ||
1400 | .end = 0, | ||
1401 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
1402 | }; | ||
1403 | |||
1404 | static struct resource code_resource = { | ||
1405 | .name = "Kernel code", | ||
1406 | .start = 0, | ||
1407 | .end = 0, | ||
1408 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | ||
1409 | }; | ||
1410 | |||
1411 | /* | ||
1412 | * We reserve all resources above 4GB so that PCI won't try to put | ||
1413 | * mappings above 4GB; the standard allows that for some devices but | ||
1414 | * the probing code trunates values to 32 bits. | ||
1415 | */ | ||
1416 | #ifdef CONFIG_PCI | ||
1417 | static struct resource* __init | ||
1418 | insert_non_bus_resource(void) | ||
1419 | { | ||
1420 | struct resource *res = | ||
1421 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | ||
1422 | res->name = "Non-Bus Physical Address Space"; | ||
1423 | res->start = (1ULL << 32); | ||
1424 | res->end = -1LL; | ||
1425 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
1426 | if (insert_resource(&iomem_resource, res)) { | ||
1427 | kfree(res); | ||
1428 | return NULL; | ||
1429 | } | ||
1430 | return res; | ||
1431 | } | ||
1432 | #endif | ||
1433 | |||
1434 | static struct resource* __init | ||
1435 | insert_ram_resource(u64 start_pfn, u64 end_pfn) | ||
1436 | { | ||
1437 | struct resource *res = | ||
1438 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | ||
1439 | res->name = "System RAM"; | ||
1440 | res->start = start_pfn << PAGE_SHIFT; | ||
1441 | res->end = (end_pfn << PAGE_SHIFT) - 1; | ||
1442 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | ||
1443 | if (insert_resource(&iomem_resource, res)) { | ||
1444 | kfree(res); | ||
1445 | return NULL; | ||
1446 | } | ||
1447 | return res; | ||
1448 | } | ||
1449 | |||
1450 | /* | ||
1451 | * Request address space for all standard resources | ||
1452 | * | ||
1453 | * If the system includes PCI root complex drivers, we need to create | ||
1454 | * a window just below 4GB where PCI BARs can be mapped. | ||
1455 | */ | ||
1456 | static int __init request_standard_resources(void) | ||
1457 | { | ||
1458 | int i; | ||
1459 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | ||
1460 | |||
1461 | iomem_resource.end = -1LL; | ||
1462 | #ifdef CONFIG_PCI | ||
1463 | insert_non_bus_resource(); | ||
1464 | #endif | ||
1465 | |||
1466 | for_each_online_node(i) { | ||
1467 | u64 start_pfn = node_start_pfn[i]; | ||
1468 | u64 end_pfn = node_end_pfn[i]; | ||
1469 | |||
1470 | #ifdef CONFIG_PCI | ||
1471 | if (start_pfn <= pci_reserve_start_pfn && | ||
1472 | end_pfn > pci_reserve_start_pfn) { | ||
1473 | if (end_pfn > pci_reserve_end_pfn) | ||
1474 | insert_ram_resource(pci_reserve_end_pfn, | ||
1475 | end_pfn); | ||
1476 | end_pfn = pci_reserve_start_pfn; | ||
1477 | } | ||
1478 | #endif | ||
1479 | insert_ram_resource(start_pfn, end_pfn); | ||
1480 | } | ||
1481 | |||
1482 | code_resource.start = __pa(_text - CODE_DELTA); | ||
1483 | code_resource.end = __pa(_etext - CODE_DELTA)-1; | ||
1484 | data_resource.start = __pa(_sdata); | ||
1485 | data_resource.end = __pa(_end)-1; | ||
1486 | |||
1487 | insert_resource(&iomem_resource, &code_resource); | ||
1488 | insert_resource(&iomem_resource, &data_resource); | ||
1489 | |||
1490 | #ifdef CONFIG_KEXEC | ||
1491 | insert_resource(&iomem_resource, &crashk_res); | ||
1492 | #endif | ||
1493 | |||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | subsys_initcall(request_standard_resources); | ||
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c new file mode 100644 index 000000000000..7ea85eb85242 --- /dev/null +++ b/arch/tile/kernel/signal.c | |||
@@ -0,0 +1,359 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/sched.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/smp_lock.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/signal.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/wait.h> | ||
24 | #include <linux/unistd.h> | ||
25 | #include <linux/stddef.h> | ||
26 | #include <linux/personality.h> | ||
27 | #include <linux/suspend.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/elf.h> | ||
30 | #include <linux/compat.h> | ||
31 | #include <linux/syscalls.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/ucontext.h> | ||
35 | #include <asm/sigframe.h> | ||
36 | #include <arch/interrupts.h> | ||
37 | |||
38 | #define DEBUG_SIG 0 | ||
39 | |||
40 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | ||
41 | |||
42 | |||
43 | /* Caller before callee in this file; other callee is in assembler */ | ||
44 | void do_signal(struct pt_regs *regs); | ||
45 | |||
46 | int _sys_sigaltstack(const stack_t __user *uss, | ||
47 | stack_t __user *uoss, struct pt_regs *regs) | ||
48 | { | ||
49 | return do_sigaltstack(uss, uoss, regs->sp); | ||
50 | } | ||
51 | |||
52 | |||
53 | /* | ||
54 | * Do a signal return; undo the signal stack. | ||
55 | */ | ||
56 | |||
57 | int restore_sigcontext(struct pt_regs *regs, | ||
58 | struct sigcontext __user *sc, long *pr0) | ||
59 | { | ||
60 | int err = 0; | ||
61 | int i; | ||
62 | |||
63 | /* Always make any pending restarted system calls return -EINTR */ | ||
64 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
65 | |||
66 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | ||
67 | err |= __get_user(((long *)regs)[i], | ||
68 | &((long *)(&sc->regs))[i]); | ||
69 | |||
70 | regs->faultnum = INT_SWINT_1_SIGRETURN; | ||
71 | |||
72 | err |= __get_user(*pr0, &sc->regs.regs[0]); | ||
73 | return err; | ||
74 | } | ||
75 | |||
76 | int _sys_rt_sigreturn(struct pt_regs *regs) | ||
77 | { | ||
78 | struct rt_sigframe __user *frame = | ||
79 | (struct rt_sigframe __user *)(regs->sp); | ||
80 | sigset_t set; | ||
81 | long r0; | ||
82 | |||
83 | if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) | ||
84 | goto badframe; | ||
85 | if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) | ||
86 | goto badframe; | ||
87 | |||
88 | sigdelsetmask(&set, ~_BLOCKABLE); | ||
89 | spin_lock_irq(¤t->sighand->siglock); | ||
90 | current->blocked = set; | ||
91 | recalc_sigpending(); | ||
92 | spin_unlock_irq(¤t->sighand->siglock); | ||
93 | |||
94 | if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) | ||
95 | goto badframe; | ||
96 | |||
97 | if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->sp) == -EFAULT) | ||
98 | goto badframe; | ||
99 | |||
100 | return r0; | ||
101 | |||
102 | badframe: | ||
103 | force_sig(SIGSEGV, current); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Set up a signal frame. | ||
109 | */ | ||
110 | |||
111 | int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) | ||
112 | { | ||
113 | int i, err = 0; | ||
114 | |||
115 | for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i) | ||
116 | err |= __put_user(((long *)regs)[i], | ||
117 | &((long *)(&sc->regs))[i]); | ||
118 | |||
119 | return err; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Determine which stack to use.. | ||
124 | */ | ||
125 | static inline void __user *get_sigframe(struct k_sigaction *ka, | ||
126 | struct pt_regs *regs, | ||
127 | size_t frame_size) | ||
128 | { | ||
129 | unsigned long sp; | ||
130 | |||
131 | /* Default to using normal stack */ | ||
132 | sp = regs->sp; | ||
133 | |||
134 | /* | ||
135 | * If we are on the alternate signal stack and would overflow | ||
136 | * it, don't. Return an always-bogus address instead so we | ||
137 | * will die with SIGSEGV. | ||
138 | */ | ||
139 | if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size))) | ||
140 | return (void __user *) -1L; | ||
141 | |||
142 | /* This is the X/Open sanctioned signal stack switching. */ | ||
143 | if (ka->sa.sa_flags & SA_ONSTACK) { | ||
144 | if (sas_ss_flags(sp) == 0) | ||
145 | sp = current->sas_ss_sp + current->sas_ss_size; | ||
146 | } | ||
147 | |||
148 | sp -= frame_size; | ||
149 | /* | ||
150 | * Align the stack pointer according to the TILE ABI, | ||
151 | * i.e. so that on function entry (sp & 15) == 0. | ||
152 | */ | ||
153 | sp &= -16UL; | ||
154 | return (void __user *) sp; | ||
155 | } | ||
156 | |||
157 | static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | ||
158 | sigset_t *set, struct pt_regs *regs) | ||
159 | { | ||
160 | unsigned long restorer; | ||
161 | struct rt_sigframe __user *frame; | ||
162 | int err = 0; | ||
163 | int usig; | ||
164 | |||
165 | frame = get_sigframe(ka, regs, sizeof(*frame)); | ||
166 | |||
167 | if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) | ||
168 | goto give_sigsegv; | ||
169 | |||
170 | usig = current_thread_info()->exec_domain | ||
171 | && current_thread_info()->exec_domain->signal_invmap | ||
172 | && sig < 32 | ||
173 | ? current_thread_info()->exec_domain->signal_invmap[sig] | ||
174 | : sig; | ||
175 | |||
176 | /* Always write at least the signal number for the stack backtracer. */ | ||
177 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
178 | /* At sigreturn time, restore the callee-save registers too. */ | ||
179 | err |= copy_siginfo_to_user(&frame->info, info); | ||
180 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
181 | } else { | ||
182 | err |= __put_user(info->si_signo, &frame->info.si_signo); | ||
183 | } | ||
184 | |||
185 | /* Create the ucontext. */ | ||
186 | err |= __clear_user(&frame->save_area, sizeof(frame->save_area)); | ||
187 | err |= __put_user(0, &frame->uc.uc_flags); | ||
188 | err |= __put_user(0, &frame->uc.uc_link); | ||
189 | err |= __put_user((void *)(current->sas_ss_sp), | ||
190 | &frame->uc.uc_stack.ss_sp); | ||
191 | err |= __put_user(sas_ss_flags(regs->sp), | ||
192 | &frame->uc.uc_stack.ss_flags); | ||
193 | err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size); | ||
194 | err |= setup_sigcontext(&frame->uc.uc_mcontext, regs); | ||
195 | err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | ||
196 | if (err) | ||
197 | goto give_sigsegv; | ||
198 | |||
199 | restorer = VDSO_BASE; | ||
200 | if (ka->sa.sa_flags & SA_RESTORER) | ||
201 | restorer = (unsigned long) ka->sa.sa_restorer; | ||
202 | |||
203 | /* | ||
204 | * Set up registers for signal handler. | ||
205 | * Registers that we don't modify keep the value they had from | ||
206 | * user-space at the time we took the signal. | ||
207 | */ | ||
208 | regs->pc = (unsigned long) ka->sa.sa_handler; | ||
209 | regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ | ||
210 | regs->sp = (unsigned long) frame; | ||
211 | regs->lr = restorer; | ||
212 | regs->regs[0] = (unsigned long) usig; | ||
213 | |||
214 | if (ka->sa.sa_flags & SA_SIGINFO) { | ||
215 | /* Need extra arguments, so mark to restore caller-saves. */ | ||
216 | regs->regs[1] = (unsigned long) &frame->info; | ||
217 | regs->regs[2] = (unsigned long) &frame->uc; | ||
218 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
219 | } | ||
220 | |||
221 | /* | ||
222 | * Notify any tracer that was single-stepping it. | ||
223 | * The tracer may want to single-step inside the | ||
224 | * handler too. | ||
225 | */ | ||
226 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
227 | ptrace_notify(SIGTRAP); | ||
228 | |||
229 | return 0; | ||
230 | |||
231 | give_sigsegv: | ||
232 | force_sigsegv(sig, current); | ||
233 | return -EFAULT; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * OK, we're invoking a handler | ||
238 | */ | ||
239 | |||
240 | static int handle_signal(unsigned long sig, siginfo_t *info, | ||
241 | struct k_sigaction *ka, sigset_t *oldset, | ||
242 | struct pt_regs *regs) | ||
243 | { | ||
244 | int ret; | ||
245 | |||
246 | |||
247 | /* Are we from a system call? */ | ||
248 | if (regs->faultnum == INT_SWINT_1) { | ||
249 | /* If so, check system call restarting.. */ | ||
250 | switch (regs->regs[0]) { | ||
251 | case -ERESTART_RESTARTBLOCK: | ||
252 | case -ERESTARTNOHAND: | ||
253 | regs->regs[0] = -EINTR; | ||
254 | break; | ||
255 | |||
256 | case -ERESTARTSYS: | ||
257 | if (!(ka->sa.sa_flags & SA_RESTART)) { | ||
258 | regs->regs[0] = -EINTR; | ||
259 | break; | ||
260 | } | ||
261 | /* fallthrough */ | ||
262 | case -ERESTARTNOINTR: | ||
263 | /* Reload caller-saves to restore r0..r5 and r10. */ | ||
264 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
265 | regs->regs[0] = regs->orig_r0; | ||
266 | regs->pc -= 8; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | /* Set up the stack frame */ | ||
271 | #ifdef CONFIG_COMPAT | ||
272 | if (is_compat_task()) | ||
273 | ret = compat_setup_rt_frame(sig, ka, info, oldset, regs); | ||
274 | else | ||
275 | #endif | ||
276 | ret = setup_rt_frame(sig, ka, info, oldset, regs); | ||
277 | if (ret == 0) { | ||
278 | /* This code is only called from system calls or from | ||
279 | * the work_pending path in the return-to-user code, and | ||
280 | * either way we can re-enable interrupts unconditionally. | ||
281 | */ | ||
282 | spin_lock_irq(¤t->sighand->siglock); | ||
283 | sigorsets(¤t->blocked, | ||
284 | ¤t->blocked, &ka->sa.sa_mask); | ||
285 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
286 | sigaddset(¤t->blocked, sig); | ||
287 | recalc_sigpending(); | ||
288 | spin_unlock_irq(¤t->sighand->siglock); | ||
289 | } | ||
290 | |||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Note that 'init' is a special process: it doesn't get signals it doesn't | ||
296 | * want to handle. Thus you cannot kill init even with a SIGKILL even by | ||
297 | * mistake. | ||
298 | */ | ||
299 | void do_signal(struct pt_regs *regs) | ||
300 | { | ||
301 | siginfo_t info; | ||
302 | int signr; | ||
303 | struct k_sigaction ka; | ||
304 | sigset_t *oldset; | ||
305 | |||
306 | /* | ||
307 | * i386 will check if we're coming from kernel mode and bail out | ||
308 | * here. In my experience this just turns weird crashes into | ||
309 | * weird spin-hangs. But if we find a case where this seems | ||
310 | * helpful, we can reinstate the check on "!user_mode(regs)". | ||
311 | */ | ||
312 | |||
313 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) | ||
314 | oldset = ¤t->saved_sigmask; | ||
315 | else | ||
316 | oldset = ¤t->blocked; | ||
317 | |||
318 | signr = get_signal_to_deliver(&info, &ka, regs, NULL); | ||
319 | if (signr > 0) { | ||
320 | /* Whee! Actually deliver the signal. */ | ||
321 | if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { | ||
322 | /* | ||
323 | * A signal was successfully delivered; the saved | ||
324 | * sigmask will have been stored in the signal frame, | ||
325 | * and will be restored by sigreturn, so we can simply | ||
326 | * clear the TS_RESTORE_SIGMASK flag. | ||
327 | */ | ||
328 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
329 | } | ||
330 | |||
331 | return; | ||
332 | } | ||
333 | |||
334 | /* Did we come from a system call? */ | ||
335 | if (regs->faultnum == INT_SWINT_1) { | ||
336 | /* Restart the system call - no handlers present */ | ||
337 | switch (regs->regs[0]) { | ||
338 | case -ERESTARTNOHAND: | ||
339 | case -ERESTARTSYS: | ||
340 | case -ERESTARTNOINTR: | ||
341 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
342 | regs->regs[0] = regs->orig_r0; | ||
343 | regs->pc -= 8; | ||
344 | break; | ||
345 | |||
346 | case -ERESTART_RESTARTBLOCK: | ||
347 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
348 | regs->regs[TREG_SYSCALL_NR] = __NR_restart_syscall; | ||
349 | regs->pc -= 8; | ||
350 | break; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | /* If there's no signal to deliver, just put the saved sigmask back. */ | ||
355 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | ||
356 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | ||
357 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | ||
358 | } | ||
359 | } | ||
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c new file mode 100644 index 000000000000..266aae123632 --- /dev/null +++ b/arch/tile/kernel/single_step.c | |||
@@ -0,0 +1,656 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * A code-rewriter that enables instruction single-stepping. | ||
15 | * Derived from iLib's single-stepping code. | ||
16 | */ | ||
17 | |||
18 | #ifndef __tilegx__ /* No support for single-step yet. */ | ||
19 | |||
20 | /* These functions are only used on the TILE platform */ | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/thread_info.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/opcode-tile.h> | ||
28 | #include <asm/opcode_constants.h> | ||
29 | #include <arch/abi.h> | ||
30 | |||
31 | #define signExtend17(val) sign_extend((val), 17) | ||
32 | #define TILE_X1_MASK (0xffffffffULL << 31) | ||
33 | |||
34 | int unaligned_printk; | ||
35 | |||
36 | static int __init setup_unaligned_printk(char *str) | ||
37 | { | ||
38 | long val; | ||
39 | if (strict_strtol(str, 0, &val) != 0) | ||
40 | return 0; | ||
41 | unaligned_printk = val; | ||
42 | printk("Printk for each unaligned data accesses is %s\n", | ||
43 | unaligned_printk ? "enabled" : "disabled"); | ||
44 | return 1; | ||
45 | } | ||
46 | __setup("unaligned_printk=", setup_unaligned_printk); | ||
47 | |||
48 | unsigned int unaligned_fixup_count; | ||
49 | |||
50 | enum mem_op { | ||
51 | MEMOP_NONE, | ||
52 | MEMOP_LOAD, | ||
53 | MEMOP_STORE, | ||
54 | MEMOP_LOAD_POSTINCR, | ||
55 | MEMOP_STORE_POSTINCR | ||
56 | }; | ||
57 | |||
58 | static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset) | ||
59 | { | ||
60 | tile_bundle_bits result; | ||
61 | |||
62 | /* mask out the old offset */ | ||
63 | tile_bundle_bits mask = create_BrOff_X1(-1); | ||
64 | result = n & (~mask); | ||
65 | |||
66 | /* or in the new offset */ | ||
67 | result |= create_BrOff_X1(offset); | ||
68 | |||
69 | return result; | ||
70 | } | ||
71 | |||
72 | static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) | ||
73 | { | ||
74 | tile_bundle_bits result; | ||
75 | tile_bundle_bits op; | ||
76 | |||
77 | result = n & (~TILE_X1_MASK); | ||
78 | |||
79 | op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | | ||
80 | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | | ||
81 | create_Dest_X1(dest) | | ||
82 | create_SrcB_X1(TREG_ZERO) | | ||
83 | create_SrcA_X1(src) ; | ||
84 | |||
85 | result |= op; | ||
86 | return result; | ||
87 | } | ||
88 | |||
89 | static inline tile_bundle_bits nop_X1(tile_bundle_bits n) | ||
90 | { | ||
91 | return move_X1(n, TREG_ZERO, TREG_ZERO); | ||
92 | } | ||
93 | |||
94 | static inline tile_bundle_bits addi_X1( | ||
95 | tile_bundle_bits n, int dest, int src, int imm) | ||
96 | { | ||
97 | n &= ~TILE_X1_MASK; | ||
98 | |||
99 | n |= (create_SrcA_X1(src) | | ||
100 | create_Dest_X1(dest) | | ||
101 | create_Imm8_X1(imm) | | ||
102 | create_S_X1(0) | | ||
103 | create_Opcode_X1(IMM_0_OPCODE_X1) | | ||
104 | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); | ||
105 | |||
106 | return n; | ||
107 | } | ||
108 | |||
109 | static tile_bundle_bits rewrite_load_store_unaligned( | ||
110 | struct single_step_state *state, | ||
111 | tile_bundle_bits bundle, | ||
112 | struct pt_regs *regs, | ||
113 | enum mem_op mem_op, | ||
114 | int size, int sign_ext) | ||
115 | { | ||
116 | unsigned char *addr; | ||
117 | int val_reg, addr_reg, err, val; | ||
118 | |||
119 | /* Get address and value registers */ | ||
120 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) { | ||
121 | addr_reg = get_SrcA_Y2(bundle); | ||
122 | val_reg = get_SrcBDest_Y2(bundle); | ||
123 | } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | ||
124 | addr_reg = get_SrcA_X1(bundle); | ||
125 | val_reg = get_Dest_X1(bundle); | ||
126 | } else { | ||
127 | addr_reg = get_SrcA_X1(bundle); | ||
128 | val_reg = get_SrcB_X1(bundle); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * If registers are not GPRs, don't try to handle it. | ||
133 | * | ||
134 | * FIXME: we could handle non-GPR loads by getting the real value | ||
135 | * from memory, writing it to the single step buffer, using a | ||
136 | * temp_reg to hold a pointer to that memory, then executing that | ||
137 | * instruction and resetting temp_reg. For non-GPR stores, it's a | ||
138 | * little trickier; we could use the single step buffer for that | ||
139 | * too, but we'd have to add some more state bits so that we could | ||
140 | * call back in here to copy that value to the real target. For | ||
141 | * now, we just handle the simple case. | ||
142 | */ | ||
143 | if ((val_reg >= PTREGS_NR_GPRS && | ||
144 | (val_reg != TREG_ZERO || | ||
145 | mem_op == MEMOP_LOAD || | ||
146 | mem_op == MEMOP_LOAD_POSTINCR)) || | ||
147 | addr_reg >= PTREGS_NR_GPRS) | ||
148 | return bundle; | ||
149 | |||
150 | /* If it's aligned, don't handle it specially */ | ||
151 | addr = (void *)regs->regs[addr_reg]; | ||
152 | if (((unsigned long)addr % size) == 0) | ||
153 | return bundle; | ||
154 | |||
155 | #ifndef __LITTLE_ENDIAN | ||
156 | # error We assume little-endian representation with copy_xx_user size 2 here | ||
157 | #endif | ||
158 | /* Handle unaligned load/store */ | ||
159 | if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { | ||
160 | unsigned short val_16; | ||
161 | switch (size) { | ||
162 | case 2: | ||
163 | err = copy_from_user(&val_16, addr, sizeof(val_16)); | ||
164 | val = sign_ext ? ((short)val_16) : val_16; | ||
165 | break; | ||
166 | case 4: | ||
167 | err = copy_from_user(&val, addr, sizeof(val)); | ||
168 | break; | ||
169 | default: | ||
170 | BUG(); | ||
171 | } | ||
172 | if (err == 0) { | ||
173 | state->update_reg = val_reg; | ||
174 | state->update_value = val; | ||
175 | state->update = 1; | ||
176 | } | ||
177 | } else { | ||
178 | val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; | ||
179 | err = copy_to_user(addr, &val, size); | ||
180 | } | ||
181 | |||
182 | if (err) { | ||
183 | siginfo_t info = { | ||
184 | .si_signo = SIGSEGV, | ||
185 | .si_code = SEGV_MAPERR, | ||
186 | .si_addr = (void __user *)addr | ||
187 | }; | ||
188 | force_sig_info(info.si_signo, &info, current); | ||
189 | return (tile_bundle_bits) 0; | ||
190 | } | ||
191 | |||
192 | if (unaligned_fixup == 0) { | ||
193 | siginfo_t info = { | ||
194 | .si_signo = SIGBUS, | ||
195 | .si_code = BUS_ADRALN, | ||
196 | .si_addr = (void __user *)addr | ||
197 | }; | ||
198 | force_sig_info(info.si_signo, &info, current); | ||
199 | return (tile_bundle_bits) 0; | ||
200 | } | ||
201 | |||
202 | if (unaligned_printk || unaligned_fixup_count == 0) { | ||
203 | printk("Process %d/%s: PC %#lx: Fixup of" | ||
204 | " unaligned %s at %#lx.\n", | ||
205 | current->pid, current->comm, regs->pc, | ||
206 | (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) ? | ||
207 | "load" : "store", | ||
208 | (unsigned long)addr); | ||
209 | if (!unaligned_printk) { | ||
210 | printk("\n" | ||
211 | "Unaligned fixups in the kernel will slow your application considerably.\n" | ||
212 | "You can find them by writing \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n" | ||
213 | "which requests the kernel show all unaligned fixups, or writing a \"0\"\n" | ||
214 | "to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n" | ||
215 | "access will become a SIGBUS you can debug. No further warnings will be\n" | ||
216 | "shown so as to avoid additional slowdown, but you can track the number\n" | ||
217 | "of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n" | ||
218 | "Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n" | ||
219 | "\n"); | ||
220 | } | ||
221 | } | ||
222 | ++unaligned_fixup_count; | ||
223 | |||
224 | if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) { | ||
225 | /* Convert the Y2 instruction to a prefetch. */ | ||
226 | bundle &= ~(create_SrcBDest_Y2(-1) | | ||
227 | create_Opcode_Y2(-1)); | ||
228 | bundle |= (create_SrcBDest_Y2(TREG_ZERO) | | ||
229 | create_Opcode_Y2(LW_OPCODE_Y2)); | ||
230 | /* Replace the load postincr with an addi */ | ||
231 | } else if (mem_op == MEMOP_LOAD_POSTINCR) { | ||
232 | bundle = addi_X1(bundle, addr_reg, addr_reg, | ||
233 | get_Imm8_X1(bundle)); | ||
234 | /* Replace the store postincr with an addi */ | ||
235 | } else if (mem_op == MEMOP_STORE_POSTINCR) { | ||
236 | bundle = addi_X1(bundle, addr_reg, addr_reg, | ||
237 | get_Dest_Imm8_X1(bundle)); | ||
238 | } else { | ||
239 | /* Convert the X1 instruction to a nop. */ | ||
240 | bundle &= ~(create_Opcode_X1(-1) | | ||
241 | create_UnShOpcodeExtension_X1(-1) | | ||
242 | create_UnOpcodeExtension_X1(-1)); | ||
243 | bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) | | ||
244 | create_UnShOpcodeExtension_X1( | ||
245 | UN_0_SHUN_0_OPCODE_X1) | | ||
246 | create_UnOpcodeExtension_X1( | ||
247 | NOP_UN_0_SHUN_0_OPCODE_X1)); | ||
248 | } | ||
249 | |||
250 | return bundle; | ||
251 | } | ||
252 | |||
253 | /** | ||
254 | * single_step_once() - entry point when single stepping has been triggered. | ||
255 | * @regs: The machine register state | ||
256 | * | ||
257 | * When we arrive at this routine via a trampoline, the single step | ||
258 | * engine copies the executing bundle to the single step buffer. | ||
259 | * If the instruction is a condition branch, then the target is | ||
260 | * reset to one past the next instruction. If the instruction | ||
261 | * sets the lr, then that is noted. If the instruction is a jump | ||
262 | * or call, then the new target pc is preserved and the current | ||
263 | * bundle instruction set to null. | ||
264 | * | ||
265 | * The necessary post-single-step rewriting information is stored in | ||
266 | * single_step_state-> We use data segment values because the | ||
267 | * stack will be rewound when we run the rewritten single-stepped | ||
268 | * instruction. | ||
269 | */ | ||
270 | void single_step_once(struct pt_regs *regs) | ||
271 | { | ||
272 | extern tile_bundle_bits __single_step_ill_insn; | ||
273 | extern tile_bundle_bits __single_step_j_insn; | ||
274 | extern tile_bundle_bits __single_step_addli_insn; | ||
275 | extern tile_bundle_bits __single_step_auli_insn; | ||
276 | struct thread_info *info = (void *)current_thread_info(); | ||
277 | struct single_step_state *state = info->step_state; | ||
278 | int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); | ||
279 | tile_bundle_bits *buffer, *pc; | ||
280 | tile_bundle_bits bundle; | ||
281 | int temp_reg; | ||
282 | int target_reg = TREG_LR; | ||
283 | int err; | ||
284 | enum mem_op mem_op = MEMOP_NONE; | ||
285 | int size = 0, sign_ext = 0; /* happy compiler */ | ||
286 | |||
287 | asm( | ||
288 | " .pushsection .rodata.single_step\n" | ||
289 | " .align 8\n" | ||
290 | " .globl __single_step_ill_insn\n" | ||
291 | "__single_step_ill_insn:\n" | ||
292 | " ill\n" | ||
293 | " .globl __single_step_addli_insn\n" | ||
294 | "__single_step_addli_insn:\n" | ||
295 | " { nop; addli r0, zero, 0 }\n" | ||
296 | " .globl __single_step_auli_insn\n" | ||
297 | "__single_step_auli_insn:\n" | ||
298 | " { nop; auli r0, r0, 0 }\n" | ||
299 | " .globl __single_step_j_insn\n" | ||
300 | "__single_step_j_insn:\n" | ||
301 | " j .\n" | ||
302 | " .popsection\n" | ||
303 | ); | ||
304 | |||
305 | if (state == NULL) { | ||
306 | /* allocate a page of writable, executable memory */ | ||
307 | state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL); | ||
308 | if (state == NULL) { | ||
309 | printk("Out of kernel memory trying to single-step\n"); | ||
310 | return; | ||
311 | } | ||
312 | |||
313 | /* allocate a cache line of writable, executable memory */ | ||
314 | down_write(¤t->mm->mmap_sem); | ||
315 | buffer = (void *) do_mmap(0, 0, 64, | ||
316 | PROT_EXEC | PROT_READ | PROT_WRITE, | ||
317 | MAP_PRIVATE | MAP_ANONYMOUS, | ||
318 | 0); | ||
319 | up_write(¤t->mm->mmap_sem); | ||
320 | |||
321 | if ((int)buffer < 0 && (int)buffer > -PAGE_SIZE) { | ||
322 | kfree(state); | ||
323 | printk("Out of kernel pages trying to single-step\n"); | ||
324 | return; | ||
325 | } | ||
326 | |||
327 | state->buffer = buffer; | ||
328 | state->is_enabled = 0; | ||
329 | |||
330 | info->step_state = state; | ||
331 | |||
332 | /* Validate our stored instruction patterns */ | ||
333 | BUG_ON(get_Opcode_X1(__single_step_addli_insn) != | ||
334 | ADDLI_OPCODE_X1); | ||
335 | BUG_ON(get_Opcode_X1(__single_step_auli_insn) != | ||
336 | AULI_OPCODE_X1); | ||
337 | BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO); | ||
338 | BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0); | ||
339 | BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0); | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * If we are returning from a syscall, we still haven't hit the | ||
344 | * "ill" for the swint1 instruction. So back the PC up to be | ||
345 | * pointing at the swint1, but we'll actually return directly | ||
346 | * back to the "ill" so we come back in via SIGILL as if we | ||
347 | * had "executed" the swint1 without ever being in kernel space. | ||
348 | */ | ||
349 | if (regs->faultnum == INT_SWINT_1) | ||
350 | regs->pc -= 8; | ||
351 | |||
352 | pc = (tile_bundle_bits *)(regs->pc); | ||
353 | bundle = pc[0]; | ||
354 | |||
355 | /* We'll follow the instruction with 2 ill op bundles */ | ||
356 | state->orig_pc = (unsigned long) pc; | ||
357 | state->next_pc = (unsigned long)(pc + 1); | ||
358 | state->branch_next_pc = 0; | ||
359 | state->update = 0; | ||
360 | |||
361 | if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) { | ||
362 | /* two wide, check for control flow */ | ||
363 | int opcode = get_Opcode_X1(bundle); | ||
364 | |||
365 | switch (opcode) { | ||
366 | /* branches */ | ||
367 | case BRANCH_OPCODE_X1: | ||
368 | { | ||
369 | int32_t offset = signExtend17(get_BrOff_X1(bundle)); | ||
370 | |||
371 | /* | ||
372 | * For branches, we use a rewriting trick to let the | ||
373 | * hardware evaluate whether the branch is taken or | ||
374 | * untaken. We record the target offset and then | ||
375 | * rewrite the branch instruction to target 1 insn | ||
376 | * ahead if the branch is taken. We then follow the | ||
377 | * rewritten branch with two bundles, each containing | ||
378 | * an "ill" instruction. The supervisor examines the | ||
379 | * pc after the single step code is executed, and if | ||
380 | * the pc is the first ill instruction, then the | ||
381 | * branch (if any) was not taken. If the pc is the | ||
382 | * second ill instruction, then the branch was | ||
383 | * taken. The new pc is computed for these cases, and | ||
384 | * inserted into the registers for the thread. If | ||
385 | * the pc is the start of the single step code, then | ||
386 | * an exception or interrupt was taken before the | ||
387 | * code started processing, and the same "original" | ||
388 | * pc is restored. This change, different from the | ||
389 | * original implementation, has the advantage of | ||
390 | * executing a single user instruction. | ||
391 | */ | ||
392 | state->branch_next_pc = (unsigned long)(pc + offset); | ||
393 | |||
394 | /* rewrite branch offset to go forward one bundle */ | ||
395 | bundle = set_BrOff_X1(bundle, 2); | ||
396 | } | ||
397 | break; | ||
398 | |||
399 | /* jumps */ | ||
400 | case JALB_OPCODE_X1: | ||
401 | case JALF_OPCODE_X1: | ||
402 | state->update = 1; | ||
403 | state->next_pc = | ||
404 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | ||
405 | break; | ||
406 | |||
407 | case JB_OPCODE_X1: | ||
408 | case JF_OPCODE_X1: | ||
409 | state->next_pc = | ||
410 | (unsigned long) (pc + get_JOffLong_X1(bundle)); | ||
411 | bundle = nop_X1(bundle); | ||
412 | break; | ||
413 | |||
414 | case SPECIAL_0_OPCODE_X1: | ||
415 | switch (get_RRROpcodeExtension_X1(bundle)) { | ||
416 | /* jump-register */ | ||
417 | case JALRP_SPECIAL_0_OPCODE_X1: | ||
418 | case JALR_SPECIAL_0_OPCODE_X1: | ||
419 | state->update = 1; | ||
420 | state->next_pc = | ||
421 | regs->regs[get_SrcA_X1(bundle)]; | ||
422 | break; | ||
423 | |||
424 | case JRP_SPECIAL_0_OPCODE_X1: | ||
425 | case JR_SPECIAL_0_OPCODE_X1: | ||
426 | state->next_pc = | ||
427 | regs->regs[get_SrcA_X1(bundle)]; | ||
428 | bundle = nop_X1(bundle); | ||
429 | break; | ||
430 | |||
431 | case LNK_SPECIAL_0_OPCODE_X1: | ||
432 | state->update = 1; | ||
433 | target_reg = get_Dest_X1(bundle); | ||
434 | break; | ||
435 | |||
436 | /* stores */ | ||
437 | case SH_SPECIAL_0_OPCODE_X1: | ||
438 | mem_op = MEMOP_STORE; | ||
439 | size = 2; | ||
440 | break; | ||
441 | |||
442 | case SW_SPECIAL_0_OPCODE_X1: | ||
443 | mem_op = MEMOP_STORE; | ||
444 | size = 4; | ||
445 | break; | ||
446 | } | ||
447 | break; | ||
448 | |||
449 | /* loads and iret */ | ||
450 | case SHUN_0_OPCODE_X1: | ||
451 | if (get_UnShOpcodeExtension_X1(bundle) == | ||
452 | UN_0_SHUN_0_OPCODE_X1) { | ||
453 | switch (get_UnOpcodeExtension_X1(bundle)) { | ||
454 | case LH_UN_0_SHUN_0_OPCODE_X1: | ||
455 | mem_op = MEMOP_LOAD; | ||
456 | size = 2; | ||
457 | sign_ext = 1; | ||
458 | break; | ||
459 | |||
460 | case LH_U_UN_0_SHUN_0_OPCODE_X1: | ||
461 | mem_op = MEMOP_LOAD; | ||
462 | size = 2; | ||
463 | sign_ext = 0; | ||
464 | break; | ||
465 | |||
466 | case LW_UN_0_SHUN_0_OPCODE_X1: | ||
467 | mem_op = MEMOP_LOAD; | ||
468 | size = 4; | ||
469 | break; | ||
470 | |||
471 | case IRET_UN_0_SHUN_0_OPCODE_X1: | ||
472 | { | ||
473 | unsigned long ex0_0 = __insn_mfspr( | ||
474 | SPR_EX_CONTEXT_0_0); | ||
475 | unsigned long ex0_1 = __insn_mfspr( | ||
476 | SPR_EX_CONTEXT_0_1); | ||
477 | /* | ||
478 | * Special-case it if we're iret'ing | ||
479 | * to PL0 again. Otherwise just let | ||
480 | * it run and it will generate SIGILL. | ||
481 | */ | ||
482 | if (EX1_PL(ex0_1) == USER_PL) { | ||
483 | state->next_pc = ex0_0; | ||
484 | regs->ex1 = ex0_1; | ||
485 | bundle = nop_X1(bundle); | ||
486 | } | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | break; | ||
491 | |||
492 | #if CHIP_HAS_WH64() | ||
493 | /* postincrement operations */ | ||
494 | case IMM_0_OPCODE_X1: | ||
495 | switch (get_ImmOpcodeExtension_X1(bundle)) { | ||
496 | case LWADD_IMM_0_OPCODE_X1: | ||
497 | mem_op = MEMOP_LOAD_POSTINCR; | ||
498 | size = 4; | ||
499 | break; | ||
500 | |||
501 | case LHADD_IMM_0_OPCODE_X1: | ||
502 | mem_op = MEMOP_LOAD_POSTINCR; | ||
503 | size = 2; | ||
504 | sign_ext = 1; | ||
505 | break; | ||
506 | |||
507 | case LHADD_U_IMM_0_OPCODE_X1: | ||
508 | mem_op = MEMOP_LOAD_POSTINCR; | ||
509 | size = 2; | ||
510 | sign_ext = 0; | ||
511 | break; | ||
512 | |||
513 | case SWADD_IMM_0_OPCODE_X1: | ||
514 | mem_op = MEMOP_STORE_POSTINCR; | ||
515 | size = 4; | ||
516 | break; | ||
517 | |||
518 | case SHADD_IMM_0_OPCODE_X1: | ||
519 | mem_op = MEMOP_STORE_POSTINCR; | ||
520 | size = 2; | ||
521 | break; | ||
522 | |||
523 | default: | ||
524 | break; | ||
525 | } | ||
526 | break; | ||
527 | #endif /* CHIP_HAS_WH64() */ | ||
528 | } | ||
529 | |||
530 | if (state->update) { | ||
531 | /* | ||
532 | * Get an available register. We start with a | ||
533 | * bitmask with 1's for available registers. | ||
534 | * We truncate to the low 32 registers since | ||
535 | * we are guaranteed to have set bits in the | ||
536 | * low 32 bits, then use ctz to pick the first. | ||
537 | */ | ||
538 | u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) | | ||
539 | (1ULL << get_SrcA_X0(bundle)) | | ||
540 | (1ULL << get_SrcB_X0(bundle)) | | ||
541 | (1ULL << target_reg)); | ||
542 | temp_reg = __builtin_ctz(mask); | ||
543 | state->update_reg = temp_reg; | ||
544 | state->update_value = regs->regs[temp_reg]; | ||
545 | regs->regs[temp_reg] = (unsigned long) (pc+1); | ||
546 | regs->flags |= PT_FLAGS_RESTORE_REGS; | ||
547 | bundle = move_X1(bundle, target_reg, temp_reg); | ||
548 | } | ||
549 | } else { | ||
550 | int opcode = get_Opcode_Y2(bundle); | ||
551 | |||
552 | switch (opcode) { | ||
553 | /* loads */ | ||
554 | case LH_OPCODE_Y2: | ||
555 | mem_op = MEMOP_LOAD; | ||
556 | size = 2; | ||
557 | sign_ext = 1; | ||
558 | break; | ||
559 | |||
560 | case LH_U_OPCODE_Y2: | ||
561 | mem_op = MEMOP_LOAD; | ||
562 | size = 2; | ||
563 | sign_ext = 0; | ||
564 | break; | ||
565 | |||
566 | case LW_OPCODE_Y2: | ||
567 | mem_op = MEMOP_LOAD; | ||
568 | size = 4; | ||
569 | break; | ||
570 | |||
571 | /* stores */ | ||
572 | case SH_OPCODE_Y2: | ||
573 | mem_op = MEMOP_STORE; | ||
574 | size = 2; | ||
575 | break; | ||
576 | |||
577 | case SW_OPCODE_Y2: | ||
578 | mem_op = MEMOP_STORE; | ||
579 | size = 4; | ||
580 | break; | ||
581 | } | ||
582 | } | ||
583 | |||
584 | /* | ||
585 | * Check if we need to rewrite an unaligned load/store. | ||
586 | * Returning zero is a special value meaning we need to SIGSEGV. | ||
587 | */ | ||
588 | if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) { | ||
589 | bundle = rewrite_load_store_unaligned(state, bundle, regs, | ||
590 | mem_op, size, sign_ext); | ||
591 | if (bundle == 0) | ||
592 | return; | ||
593 | } | ||
594 | |||
595 | /* write the bundle to our execution area */ | ||
596 | buffer = state->buffer; | ||
597 | err = __put_user(bundle, buffer++); | ||
598 | |||
599 | /* | ||
600 | * If we're really single-stepping, we take an INT_ILL after. | ||
601 | * If we're just handling an unaligned access, we can just | ||
602 | * jump directly back to where we were in user code. | ||
603 | */ | ||
604 | if (is_single_step) { | ||
605 | err |= __put_user(__single_step_ill_insn, buffer++); | ||
606 | err |= __put_user(__single_step_ill_insn, buffer++); | ||
607 | } else { | ||
608 | long delta; | ||
609 | |||
610 | if (state->update) { | ||
611 | /* We have some state to update; do it inline */ | ||
612 | int ha16; | ||
613 | bundle = __single_step_addli_insn; | ||
614 | bundle |= create_Dest_X1(state->update_reg); | ||
615 | bundle |= create_Imm16_X1(state->update_value); | ||
616 | err |= __put_user(bundle, buffer++); | ||
617 | bundle = __single_step_auli_insn; | ||
618 | bundle |= create_Dest_X1(state->update_reg); | ||
619 | bundle |= create_SrcA_X1(state->update_reg); | ||
620 | ha16 = (state->update_value + 0x8000) >> 16; | ||
621 | bundle |= create_Imm16_X1(ha16); | ||
622 | err |= __put_user(bundle, buffer++); | ||
623 | state->update = 0; | ||
624 | } | ||
625 | |||
626 | /* End with a jump back to the next instruction */ | ||
627 | delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) - | ||
628 | (unsigned long)buffer) >> | ||
629 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; | ||
630 | bundle = __single_step_j_insn; | ||
631 | bundle |= create_JOffLong_X1(delta); | ||
632 | err |= __put_user(bundle, buffer++); | ||
633 | } | ||
634 | |||
635 | if (err) { | ||
636 | printk("Fault when writing to single-step buffer\n"); | ||
637 | return; | ||
638 | } | ||
639 | |||
640 | /* | ||
641 | * Flush the buffer. | ||
642 | * We do a local flush only, since this is a thread-specific buffer. | ||
643 | */ | ||
644 | __flush_icache_range((unsigned long) state->buffer, | ||
645 | (unsigned long) buffer); | ||
646 | |||
647 | /* Indicate enabled */ | ||
648 | state->is_enabled = is_single_step; | ||
649 | regs->pc = (unsigned long) state->buffer; | ||
650 | |||
651 | /* Fault immediately if we are coming back from a syscall. */ | ||
652 | if (regs->faultnum == INT_SWINT_1) | ||
653 | regs->pc += 8; | ||
654 | } | ||
655 | |||
656 | #endif /* !__tilegx__ */ | ||
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c new file mode 100644 index 000000000000..782c1bfa6dfe --- /dev/null +++ b/arch/tile/kernel/smp.c | |||
@@ -0,0 +1,202 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE SMP support routines. | ||
15 | */ | ||
16 | |||
17 | #include <linux/smp.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | |||
21 | HV_Topology smp_topology __write_once; | ||
22 | |||
23 | |||
24 | /* | ||
25 | * Top-level send_IPI*() functions to send messages to other cpus. | ||
26 | */ | ||
27 | |||
28 | /* Set by smp_send_stop() to avoid recursive panics. */ | ||
29 | static int stopping_cpus; | ||
30 | |||
31 | void send_IPI_single(int cpu, int tag) | ||
32 | { | ||
33 | HV_Recipient recip = { | ||
34 | .y = cpu / smp_width, | ||
35 | .x = cpu % smp_width, | ||
36 | .state = HV_TO_BE_SENT | ||
37 | }; | ||
38 | int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag)); | ||
39 | BUG_ON(rc <= 0); | ||
40 | } | ||
41 | |||
42 | void send_IPI_many(const struct cpumask *mask, int tag) | ||
43 | { | ||
44 | HV_Recipient recip[NR_CPUS]; | ||
45 | int cpu, sent; | ||
46 | int nrecip = 0; | ||
47 | int my_cpu = smp_processor_id(); | ||
48 | for_each_cpu(cpu, mask) { | ||
49 | HV_Recipient *r; | ||
50 | BUG_ON(cpu == my_cpu); | ||
51 | r = &recip[nrecip++]; | ||
52 | r->y = cpu / smp_width; | ||
53 | r->x = cpu % smp_width; | ||
54 | r->state = HV_TO_BE_SENT; | ||
55 | } | ||
56 | sent = 0; | ||
57 | while (sent < nrecip) { | ||
58 | int rc = hv_send_message(recip, nrecip, | ||
59 | (HV_VirtAddr)&tag, sizeof(tag)); | ||
60 | if (rc <= 0) { | ||
61 | if (!stopping_cpus) /* avoid recursive panic */ | ||
62 | panic("hv_send_message returned %d", rc); | ||
63 | break; | ||
64 | } | ||
65 | sent += rc; | ||
66 | } | ||
67 | } | ||
68 | |||
69 | void send_IPI_allbutself(int tag) | ||
70 | { | ||
71 | struct cpumask mask; | ||
72 | cpumask_copy(&mask, cpu_online_mask); | ||
73 | cpumask_clear_cpu(smp_processor_id(), &mask); | ||
74 | send_IPI_many(&mask, tag); | ||
75 | } | ||
76 | |||
77 | |||
78 | /* | ||
79 | * Provide smp_call_function_mask, but also run function locally | ||
80 | * if specified in the mask. | ||
81 | */ | ||
82 | void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *), | ||
83 | void *info, bool wait) | ||
84 | { | ||
85 | int cpu = get_cpu(); | ||
86 | smp_call_function_many(mask, func, info, wait); | ||
87 | if (cpumask_test_cpu(cpu, mask)) { | ||
88 | local_irq_disable(); | ||
89 | func(info); | ||
90 | local_irq_enable(); | ||
91 | } | ||
92 | put_cpu(); | ||
93 | } | ||
94 | |||
95 | |||
96 | /* | ||
97 | * Functions related to starting/stopping cpus. | ||
98 | */ | ||
99 | |||
100 | /* Handler to start the current cpu. */ | ||
101 | static void smp_start_cpu_interrupt(void) | ||
102 | { | ||
103 | extern unsigned long start_cpu_function_addr; | ||
104 | get_irq_regs()->pc = start_cpu_function_addr; | ||
105 | } | ||
106 | |||
107 | /* Handler to stop the current cpu. */ | ||
108 | static void smp_stop_cpu_interrupt(void) | ||
109 | { | ||
110 | set_cpu_online(smp_processor_id(), 0); | ||
111 | raw_local_irq_disable_all(); | ||
112 | for (;;) | ||
113 | asm("nap"); | ||
114 | } | ||
115 | |||
116 | /* This function calls the 'stop' function on all other CPUs in the system. */ | ||
117 | void smp_send_stop(void) | ||
118 | { | ||
119 | stopping_cpus = 1; | ||
120 | send_IPI_allbutself(MSG_TAG_STOP_CPU); | ||
121 | } | ||
122 | |||
123 | |||
124 | /* | ||
125 | * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages. | ||
126 | */ | ||
127 | void evaluate_message(int tag) | ||
128 | { | ||
129 | switch (tag) { | ||
130 | case MSG_TAG_START_CPU: /* Start up a cpu */ | ||
131 | smp_start_cpu_interrupt(); | ||
132 | break; | ||
133 | |||
134 | case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */ | ||
135 | smp_stop_cpu_interrupt(); | ||
136 | break; | ||
137 | |||
138 | case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */ | ||
139 | generic_smp_call_function_interrupt(); | ||
140 | break; | ||
141 | |||
142 | case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */ | ||
143 | generic_smp_call_function_single_interrupt(); | ||
144 | break; | ||
145 | |||
146 | default: | ||
147 | panic("Unknown IPI message tag %d", tag); | ||
148 | break; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | |||
153 | /* | ||
154 | * flush_icache_range() code uses smp_call_function(). | ||
155 | */ | ||
156 | |||
157 | struct ipi_flush { | ||
158 | unsigned long start; | ||
159 | unsigned long end; | ||
160 | }; | ||
161 | |||
162 | static void ipi_flush_icache_range(void *info) | ||
163 | { | ||
164 | struct ipi_flush *flush = (struct ipi_flush *) info; | ||
165 | __flush_icache_range(flush->start, flush->end); | ||
166 | } | ||
167 | |||
168 | void flush_icache_range(unsigned long start, unsigned long end) | ||
169 | { | ||
170 | struct ipi_flush flush = { start, end }; | ||
171 | preempt_disable(); | ||
172 | on_each_cpu(ipi_flush_icache_range, &flush, 1); | ||
173 | preempt_enable(); | ||
174 | } | ||
175 | |||
176 | |||
177 | /* | ||
178 | * The smp_send_reschedule() path does not use the hv_message_intr() | ||
179 | * path but instead the faster tile_dev_intr() path for interrupts. | ||
180 | */ | ||
181 | |||
182 | irqreturn_t handle_reschedule_ipi(int irq, void *token) | ||
183 | { | ||
184 | /* | ||
185 | * Nothing to do here; when we return from interrupt, the | ||
186 | * rescheduling will occur there. But do bump the interrupt | ||
187 | * profiler count in the meantime. | ||
188 | */ | ||
189 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
190 | |||
191 | return IRQ_HANDLED; | ||
192 | } | ||
193 | |||
194 | void smp_send_reschedule(int cpu) | ||
195 | { | ||
196 | HV_Coord coord; | ||
197 | |||
198 | WARN_ON(cpu_is_offline(cpu)); | ||
199 | coord.y = cpu / smp_width; | ||
200 | coord.x = cpu % smp_width; | ||
201 | hv_trigger_ipi(coord, IRQ_RESCHEDULE); | ||
202 | } | ||
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c new file mode 100644 index 000000000000..aa3aafdb4b93 --- /dev/null +++ b/arch/tile/kernel/smpboot.c | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/smp_lock.h> | ||
22 | #include <linux/bootmem.h> | ||
23 | #include <linux/notifier.h> | ||
24 | #include <linux/cpu.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/tlbflush.h> | ||
30 | #include <asm/sections.h> | ||
31 | |||
32 | /* | ||
33 | * This assembly function is provided in entry.S. | ||
34 | * When called, it loops on a nap instruction forever. | ||
35 | * FIXME: should be in a header somewhere. | ||
36 | */ | ||
37 | extern void smp_nap(void); | ||
38 | |||
39 | /* State of each CPU. */ | ||
40 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | ||
41 | |||
42 | /* The messaging code jumps to this pointer during boot-up */ | ||
43 | unsigned long start_cpu_function_addr; | ||
44 | |||
45 | /* Called very early during startup to mark boot cpu as online */ | ||
46 | void __init smp_prepare_boot_cpu(void) | ||
47 | { | ||
48 | int cpu = smp_processor_id(); | ||
49 | set_cpu_online(cpu, 1); | ||
50 | set_cpu_present(cpu, 1); | ||
51 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
52 | |||
53 | init_messaging(); | ||
54 | } | ||
55 | |||
56 | static void start_secondary(void); | ||
57 | |||
58 | /* | ||
59 | * Called at the top of init() to launch all the other CPUs. | ||
60 | * They run free to complete their initialization and then wait | ||
61 | * until they get an IPI from the boot cpu to come online. | ||
62 | */ | ||
63 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
64 | { | ||
65 | long rc; | ||
66 | int cpu, cpu_count; | ||
67 | int boot_cpu = smp_processor_id(); | ||
68 | |||
69 | current_thread_info()->cpu = boot_cpu; | ||
70 | |||
71 | /* | ||
72 | * Pin this task to the boot CPU while we bring up the others, | ||
73 | * just to make sure we don't uselessly migrate as they come up. | ||
74 | */ | ||
75 | rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); | ||
76 | if (rc != 0) | ||
77 | printk("Couldn't set init affinity to boot cpu (%ld)\n", rc); | ||
78 | |||
79 | /* Print information about disabled and dataplane cpus. */ | ||
80 | print_disabled_cpus(); | ||
81 | |||
82 | /* | ||
83 | * Tell the messaging subsystem how to respond to the | ||
84 | * startup message. We use a level of indirection to avoid | ||
85 | * confusing the linker with the fact that the messaging | ||
86 | * subsystem is calling __init code. | ||
87 | */ | ||
88 | start_cpu_function_addr = (unsigned long) &online_secondary; | ||
89 | |||
90 | /* Set up thread context for all new processors. */ | ||
91 | cpu_count = 1; | ||
92 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { | ||
93 | struct task_struct *idle; | ||
94 | |||
95 | if (cpu == boot_cpu) | ||
96 | continue; | ||
97 | |||
98 | if (!cpu_possible(cpu)) { | ||
99 | /* | ||
100 | * Make this processor do nothing on boot. | ||
101 | * Note that we don't give the boot_pc function | ||
102 | * a stack, so it has to be assembly code. | ||
103 | */ | ||
104 | per_cpu(boot_sp, cpu) = 0; | ||
105 | per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; | ||
106 | continue; | ||
107 | } | ||
108 | |||
109 | /* Create a new idle thread to run start_secondary() */ | ||
110 | idle = fork_idle(cpu); | ||
111 | if (IS_ERR(idle)) | ||
112 | panic("failed fork for CPU %d", cpu); | ||
113 | idle->thread.pc = (unsigned long) start_secondary; | ||
114 | |||
115 | /* Make this thread the boot thread for this processor */ | ||
116 | per_cpu(boot_sp, cpu) = task_ksp0(idle); | ||
117 | per_cpu(boot_pc, cpu) = idle->thread.pc; | ||
118 | |||
119 | ++cpu_count; | ||
120 | } | ||
121 | BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); | ||
122 | |||
123 | /* Fire up the other tiles, if any */ | ||
124 | init_cpu_present(cpu_possible_mask); | ||
125 | if (cpumask_weight(cpu_present_mask) > 1) { | ||
126 | mb(); /* make sure all data is visible to new processors */ | ||
127 | hv_start_all_tiles(); | ||
128 | } | ||
129 | } | ||
130 | |||
131 | static __initdata struct cpumask init_affinity; | ||
132 | |||
133 | static __init int reset_init_affinity(void) | ||
134 | { | ||
135 | long rc = sched_setaffinity(current->pid, &init_affinity); | ||
136 | if (rc != 0) | ||
137 | printk(KERN_WARNING "couldn't reset init affinity (%ld)\n", | ||
138 | rc); | ||
139 | return 0; | ||
140 | } | ||
141 | late_initcall(reset_init_affinity); | ||
142 | |||
143 | struct cpumask cpu_started __cpuinitdata; | ||
144 | |||
145 | /* | ||
146 | * Activate a secondary processor. Very minimal; don't add anything | ||
147 | * to this path without knowing what you're doing, since SMP booting | ||
148 | * is pretty fragile. | ||
149 | */ | ||
150 | static void __cpuinit start_secondary(void) | ||
151 | { | ||
152 | int cpuid = smp_processor_id(); | ||
153 | |||
154 | /* Set our thread pointer appropriately. */ | ||
155 | set_my_cpu_offset(__per_cpu_offset[cpuid]); | ||
156 | |||
157 | preempt_disable(); | ||
158 | |||
159 | /* | ||
160 | * In large machines even this will slow us down, since we | ||
161 | * will be contending for for the printk spinlock. | ||
162 | */ | ||
163 | /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ | ||
164 | |||
165 | /* Initialize the current asid for our first page table. */ | ||
166 | __get_cpu_var(current_asid) = min_asid; | ||
167 | |||
168 | /* Set up this thread as another owner of the init_mm */ | ||
169 | atomic_inc(&init_mm.mm_count); | ||
170 | current->active_mm = &init_mm; | ||
171 | if (current->mm) | ||
172 | BUG(); | ||
173 | enter_lazy_tlb(&init_mm, current); | ||
174 | |||
175 | /* Enable IRQs. */ | ||
176 | init_per_tile_IRQs(); | ||
177 | |||
178 | /* Allow hypervisor messages to be received */ | ||
179 | init_messaging(); | ||
180 | local_irq_enable(); | ||
181 | |||
182 | /* Indicate that we're ready to come up. */ | ||
183 | /* Must not do this before we're ready to receive messages */ | ||
184 | if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { | ||
185 | printk(KERN_WARNING "CPU#%d already started!\n", cpuid); | ||
186 | for (;;) | ||
187 | local_irq_enable(); | ||
188 | } | ||
189 | |||
190 | smp_nap(); | ||
191 | } | ||
192 | |||
193 | void setup_mpls(void); /* from kernel/setup.c */ | ||
194 | void store_permanent_mappings(void); | ||
195 | |||
196 | /* | ||
197 | * Bring a secondary processor online. | ||
198 | */ | ||
199 | void __cpuinit online_secondary() | ||
200 | { | ||
201 | /* | ||
202 | * low-memory mappings have been cleared, flush them from | ||
203 | * the local TLBs too. | ||
204 | */ | ||
205 | local_flush_tlb(); | ||
206 | |||
207 | BUG_ON(in_interrupt()); | ||
208 | |||
209 | /* This must be done before setting cpu_online_mask */ | ||
210 | wmb(); | ||
211 | |||
212 | /* | ||
213 | * We need to hold call_lock, so there is no inconsistency | ||
214 | * between the time smp_call_function() determines number of | ||
215 | * IPI recipients, and the time when the determination is made | ||
216 | * for which cpus receive the IPI. Holding this | ||
217 | * lock helps us to not include this cpu in a currently in progress | ||
218 | * smp_call_function(). | ||
219 | */ | ||
220 | ipi_call_lock(); | ||
221 | set_cpu_online(smp_processor_id(), 1); | ||
222 | ipi_call_unlock(); | ||
223 | __get_cpu_var(cpu_state) = CPU_ONLINE; | ||
224 | |||
225 | /* Set up MPLs for this processor */ | ||
226 | setup_mpls(); | ||
227 | |||
228 | |||
229 | /* Set up tile-timer clock-event device on this cpu */ | ||
230 | setup_tile_timer(); | ||
231 | |||
232 | preempt_enable(); | ||
233 | |||
234 | store_permanent_mappings(); | ||
235 | |||
236 | cpu_idle(); | ||
237 | } | ||
238 | |||
239 | int __cpuinit __cpu_up(unsigned int cpu) | ||
240 | { | ||
241 | /* Wait 5s total for all CPUs for them to come online */ | ||
242 | static int timeout; | ||
243 | for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { | ||
244 | if (timeout >= 50000) { | ||
245 | printk(KERN_INFO "skipping unresponsive cpu%d\n", cpu); | ||
246 | local_irq_enable(); | ||
247 | return -EIO; | ||
248 | } | ||
249 | udelay(100); | ||
250 | } | ||
251 | |||
252 | local_irq_enable(); | ||
253 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
254 | |||
255 | /* Unleash the CPU! */ | ||
256 | send_IPI_single(cpu, MSG_TAG_START_CPU); | ||
257 | while (!cpumask_test_cpu(cpu, cpu_online_mask)) | ||
258 | cpu_relax(); | ||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | static void panic_start_cpu(void) | ||
263 | { | ||
264 | panic("Received a MSG_START_CPU IPI after boot finished."); | ||
265 | } | ||
266 | |||
267 | void __init smp_cpus_done(unsigned int max_cpus) | ||
268 | { | ||
269 | int cpu, next, rc; | ||
270 | |||
271 | /* Reset the response to a (now illegal) MSG_START_CPU IPI. */ | ||
272 | start_cpu_function_addr = (unsigned long) &panic_start_cpu; | ||
273 | |||
274 | cpumask_copy(&init_affinity, cpu_online_mask); | ||
275 | |||
276 | /* | ||
277 | * Pin ourselves to a single cpu in the initial affinity set | ||
278 | * so that kernel mappings for the rootfs are not in the dataplane, | ||
279 | * if set, and to avoid unnecessary migrating during bringup. | ||
280 | * Use the last cpu just in case the whole chip has been | ||
281 | * isolated from the scheduler, to keep init away from likely | ||
282 | * more useful user code. This also ensures that work scheduled | ||
283 | * via schedule_delayed_work() in the init routines will land | ||
284 | * on this cpu. | ||
285 | */ | ||
286 | for (cpu = cpumask_first(&init_affinity); | ||
287 | (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; | ||
288 | cpu = next) | ||
289 | ; | ||
290 | rc = sched_setaffinity(current->pid, cpumask_of(cpu)); | ||
291 | if (rc != 0) | ||
292 | printk("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); | ||
293 | } | ||
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c new file mode 100644 index 000000000000..382170b4b40a --- /dev/null +++ b/arch/tile/kernel/stack.c | |||
@@ -0,0 +1,485 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/pfn.h> | ||
20 | #include <linux/kallsyms.h> | ||
21 | #include <linux/stacktrace.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/mmzone.h> | ||
24 | #include <asm/backtrace.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/ucontext.h> | ||
28 | #include <asm/sigframe.h> | ||
29 | #include <asm/stack.h> | ||
30 | #include <arch/abi.h> | ||
31 | #include <arch/interrupts.h> | ||
32 | |||
33 | |||
34 | /* Is address on the specified kernel stack? */ | ||
35 | static int in_kernel_stack(struct KBacktraceIterator *kbt, VirtualAddress sp) | ||
36 | { | ||
37 | ulong kstack_base = (ulong) kbt->task->stack; | ||
38 | if (kstack_base == 0) /* corrupt task pointer; just follow stack... */ | ||
39 | return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory; | ||
40 | return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; | ||
41 | } | ||
42 | |||
43 | /* Is address in the specified kernel code? */ | ||
44 | static int in_kernel_text(VirtualAddress address) | ||
45 | { | ||
46 | return (address >= MEM_SV_INTRPT && | ||
47 | address < MEM_SV_INTRPT + HPAGE_SIZE); | ||
48 | } | ||
49 | |||
50 | /* Is address valid for reading? */ | ||
51 | static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) | ||
52 | { | ||
53 | HV_PTE *l1_pgtable = kbt->pgtable; | ||
54 | HV_PTE *l2_pgtable; | ||
55 | unsigned long pfn; | ||
56 | HV_PTE pte; | ||
57 | struct page *page; | ||
58 | |||
59 | pte = l1_pgtable[HV_L1_INDEX(address)]; | ||
60 | if (!hv_pte_get_present(pte)) | ||
61 | return 0; | ||
62 | pfn = hv_pte_get_pfn(pte); | ||
63 | if (pte_huge(pte)) { | ||
64 | if (!pfn_valid(pfn)) { | ||
65 | printk(KERN_ERR "huge page has bad pfn %#lx\n", pfn); | ||
66 | return 0; | ||
67 | } | ||
68 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
69 | } | ||
70 | |||
71 | page = pfn_to_page(pfn); | ||
72 | if (PageHighMem(page)) { | ||
73 | printk(KERN_ERR "L2 page table not in LOWMEM (%#llx)\n", | ||
74 | HV_PFN_TO_CPA(pfn)); | ||
75 | return 0; | ||
76 | } | ||
77 | l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); | ||
78 | pte = l2_pgtable[HV_L2_INDEX(address)]; | ||
79 | return hv_pte_get_present(pte) && hv_pte_get_readable(pte); | ||
80 | } | ||
81 | |||
82 | /* Callback for backtracer; basically a glorified memcpy */ | ||
83 | static bool read_memory_func(void *result, VirtualAddress address, | ||
84 | unsigned int size, void *vkbt) | ||
85 | { | ||
86 | int retval; | ||
87 | struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; | ||
88 | if (in_kernel_text(address)) { | ||
89 | /* OK to read kernel code. */ | ||
90 | } else if (address >= PAGE_OFFSET) { | ||
91 | /* We only tolerate kernel-space reads of this task's stack */ | ||
92 | if (!in_kernel_stack(kbt, address)) | ||
93 | return 0; | ||
94 | } else if (kbt->pgtable == NULL) { | ||
95 | return 0; /* can't read user space in other tasks */ | ||
96 | } else if (!valid_address(kbt, address)) { | ||
97 | return 0; /* invalid user-space address */ | ||
98 | } | ||
99 | pagefault_disable(); | ||
100 | retval = __copy_from_user_inatomic(result, (const void *)address, | ||
101 | size); | ||
102 | pagefault_enable(); | ||
103 | return (retval == 0); | ||
104 | } | ||
105 | |||
106 | /* Return a pt_regs pointer for a valid fault handler frame */ | ||
107 | static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) | ||
108 | { | ||
109 | #ifndef __tilegx__ | ||
110 | const char *fault = NULL; /* happy compiler */ | ||
111 | char fault_buf[64]; | ||
112 | VirtualAddress sp = kbt->it.sp; | ||
113 | struct pt_regs *p; | ||
114 | |||
115 | if (!in_kernel_stack(kbt, sp)) | ||
116 | return NULL; | ||
117 | if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) | ||
118 | return NULL; | ||
119 | p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE); | ||
120 | if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN) | ||
121 | fault = "syscall"; | ||
122 | else { | ||
123 | if (kbt->verbose) { /* else we aren't going to use it */ | ||
124 | snprintf(fault_buf, sizeof(fault_buf), | ||
125 | "interrupt %ld", p->faultnum); | ||
126 | fault = fault_buf; | ||
127 | } | ||
128 | } | ||
129 | if (EX1_PL(p->ex1) == KERNEL_PL && | ||
130 | in_kernel_text(p->pc) && | ||
131 | in_kernel_stack(kbt, p->sp) && | ||
132 | p->sp >= sp) { | ||
133 | if (kbt->verbose) | ||
134 | printk(KERN_ERR " <%s while in kernel mode>\n", fault); | ||
135 | } else if (EX1_PL(p->ex1) == USER_PL && | ||
136 | p->pc < PAGE_OFFSET && | ||
137 | p->sp < PAGE_OFFSET) { | ||
138 | if (kbt->verbose) | ||
139 | printk(KERN_ERR " <%s while in user mode>\n", fault); | ||
140 | } else if (kbt->verbose) { | ||
141 | printk(KERN_ERR " (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", | ||
142 | p->pc, p->sp, p->ex1); | ||
143 | p = NULL; | ||
144 | } | ||
145 | if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0) | ||
146 | return p; | ||
147 | #endif | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | /* Is the pc pointing to a sigreturn trampoline? */ | ||
152 | static int is_sigreturn(VirtualAddress pc) | ||
153 | { | ||
154 | return (pc == VDSO_BASE); | ||
155 | } | ||
156 | |||
157 | /* Return a pt_regs pointer for a valid signal handler frame */ | ||
158 | static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) | ||
159 | { | ||
160 | BacktraceIterator *b = &kbt->it; | ||
161 | |||
162 | if (b->pc == VDSO_BASE) { | ||
163 | struct rt_sigframe *frame; | ||
164 | unsigned long sigframe_top = | ||
165 | b->sp + sizeof(struct rt_sigframe) - 1; | ||
166 | if (!valid_address(kbt, b->sp) || | ||
167 | !valid_address(kbt, sigframe_top)) { | ||
168 | if (kbt->verbose) | ||
169 | printk(" (odd signal: sp %#lx?)\n", | ||
170 | (unsigned long)(b->sp)); | ||
171 | return NULL; | ||
172 | } | ||
173 | frame = (struct rt_sigframe *)b->sp; | ||
174 | if (kbt->verbose) { | ||
175 | printk(KERN_ERR " <received signal %d>\n", | ||
176 | frame->info.si_signo); | ||
177 | } | ||
178 | return &frame->uc.uc_mcontext.regs; | ||
179 | } | ||
180 | return NULL; | ||
181 | } | ||
182 | |||
183 | int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt) | ||
184 | { | ||
185 | return is_sigreturn(kbt->it.pc); | ||
186 | } | ||
187 | |||
188 | static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) | ||
189 | { | ||
190 | struct pt_regs *p; | ||
191 | |||
192 | p = valid_fault_handler(kbt); | ||
193 | if (p == NULL) | ||
194 | p = valid_sigframe(kbt); | ||
195 | if (p == NULL) | ||
196 | return 0; | ||
197 | backtrace_init(&kbt->it, read_memory_func, kbt, | ||
198 | p->pc, p->lr, p->sp, p->regs[52]); | ||
199 | kbt->new_context = 1; | ||
200 | return 1; | ||
201 | } | ||
202 | |||
203 | /* Find a frame that isn't a sigreturn, if there is one. */ | ||
204 | static int KBacktraceIterator_next_item_inclusive( | ||
205 | struct KBacktraceIterator *kbt) | ||
206 | { | ||
207 | for (;;) { | ||
208 | do { | ||
209 | if (!KBacktraceIterator_is_sigreturn(kbt)) | ||
210 | return 1; | ||
211 | } while (backtrace_next(&kbt->it)); | ||
212 | |||
213 | if (!KBacktraceIterator_restart(kbt)) | ||
214 | return 0; | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * If the current sp is on a page different than what we recorded | ||
220 | * as the top-of-kernel-stack last time we context switched, we have | ||
221 | * probably blown the stack, and nothing is going to work out well. | ||
222 | * If we can at least get out a warning, that may help the debug, | ||
223 | * though we probably won't be able to backtrace into the code that | ||
224 | * actually did the recursive damage. | ||
225 | */ | ||
226 | static void validate_stack(struct pt_regs *regs) | ||
227 | { | ||
228 | int cpu = smp_processor_id(); | ||
229 | unsigned long ksp0 = get_current_ksp0(); | ||
230 | unsigned long ksp0_base = ksp0 - THREAD_SIZE; | ||
231 | unsigned long sp = stack_pointer; | ||
232 | |||
233 | if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { | ||
234 | printk("WARNING: cpu %d: kernel stack page %#lx underrun!\n" | ||
235 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | ||
236 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | ||
237 | } | ||
238 | |||
239 | else if (sp < ksp0_base + sizeof(struct thread_info)) { | ||
240 | printk("WARNING: cpu %d: kernel stack page %#lx overrun!\n" | ||
241 | " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", | ||
242 | cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); | ||
243 | } | ||
244 | } | ||
245 | |||
246 | void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
247 | struct task_struct *t, struct pt_regs *regs) | ||
248 | { | ||
249 | VirtualAddress pc, lr, sp, r52; | ||
250 | int is_current; | ||
251 | |||
252 | /* | ||
253 | * Set up callback information. We grab the kernel stack base | ||
254 | * so we will allow reads of that address range, and if we're | ||
255 | * asking about the current process we grab the page table | ||
256 | * so we can check user accesses before trying to read them. | ||
257 | * We flush the TLB to avoid any weird skew issues. | ||
258 | */ | ||
259 | is_current = (t == NULL); | ||
260 | kbt->is_current = is_current; | ||
261 | if (is_current) | ||
262 | t = validate_current(); | ||
263 | kbt->task = t; | ||
264 | kbt->pgtable = NULL; | ||
265 | kbt->verbose = 0; /* override in caller if desired */ | ||
266 | kbt->profile = 0; /* override in caller if desired */ | ||
267 | kbt->end = 0; | ||
268 | kbt->new_context = 0; | ||
269 | if (is_current) { | ||
270 | HV_PhysAddr pgdir_pa = hv_inquire_context().page_table; | ||
271 | if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) { | ||
272 | /* | ||
273 | * Not just an optimization: this also allows | ||
274 | * this to work at all before va/pa mappings | ||
275 | * are set up. | ||
276 | */ | ||
277 | kbt->pgtable = swapper_pg_dir; | ||
278 | } else { | ||
279 | struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa)); | ||
280 | if (!PageHighMem(page)) | ||
281 | kbt->pgtable = __va(pgdir_pa); | ||
282 | else | ||
283 | printk(KERN_ERR "page table not in LOWMEM" | ||
284 | " (%#llx)\n", pgdir_pa); | ||
285 | } | ||
286 | local_flush_tlb_all(); | ||
287 | validate_stack(regs); | ||
288 | } | ||
289 | |||
290 | if (regs == NULL) { | ||
291 | extern const void *get_switch_to_pc(void); | ||
292 | if (is_current || t->state == TASK_RUNNING) { | ||
293 | /* Can't do this; we need registers */ | ||
294 | kbt->end = 1; | ||
295 | return; | ||
296 | } | ||
297 | pc = (ulong) get_switch_to_pc(); | ||
298 | lr = t->thread.pc; | ||
299 | sp = t->thread.ksp; | ||
300 | r52 = 0; | ||
301 | } else { | ||
302 | pc = regs->pc; | ||
303 | lr = regs->lr; | ||
304 | sp = regs->sp; | ||
305 | r52 = regs->regs[52]; | ||
306 | } | ||
307 | |||
308 | backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52); | ||
309 | kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); | ||
310 | } | ||
311 | EXPORT_SYMBOL(KBacktraceIterator_init); | ||
312 | |||
313 | int KBacktraceIterator_end(struct KBacktraceIterator *kbt) | ||
314 | { | ||
315 | return kbt->end; | ||
316 | } | ||
317 | EXPORT_SYMBOL(KBacktraceIterator_end); | ||
318 | |||
319 | void KBacktraceIterator_next(struct KBacktraceIterator *kbt) | ||
320 | { | ||
321 | kbt->new_context = 0; | ||
322 | if (!backtrace_next(&kbt->it) && | ||
323 | !KBacktraceIterator_restart(kbt)) { | ||
324 | kbt->end = 1; | ||
325 | return; | ||
326 | } | ||
327 | |||
328 | kbt->end = !KBacktraceIterator_next_item_inclusive(kbt); | ||
329 | } | ||
330 | EXPORT_SYMBOL(KBacktraceIterator_next); | ||
331 | |||
332 | /* | ||
333 | * This method wraps the backtracer's more generic support. | ||
334 | * It is only invoked from the architecture-specific code; show_stack() | ||
335 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
336 | */ | ||
337 | void tile_show_stack(struct KBacktraceIterator *kbt, int headers) | ||
338 | { | ||
339 | int i; | ||
340 | |||
341 | if (headers) { | ||
342 | /* | ||
343 | * Add a blank line since if we are called from panic(), | ||
344 | * then bust_spinlocks() spit out a space in front of us | ||
345 | * and it will mess up our KERN_ERR. | ||
346 | */ | ||
347 | printk("\n"); | ||
348 | printk(KERN_ERR "Starting stack dump of tid %d, pid %d (%s)" | ||
349 | " on cpu %d at cycle %lld\n", | ||
350 | kbt->task->pid, kbt->task->tgid, kbt->task->comm, | ||
351 | smp_processor_id(), get_cycles()); | ||
352 | } | ||
353 | #ifdef __tilegx__ | ||
354 | if (kbt->is_current) { | ||
355 | __insn_mtspr(SPR_SIM_CONTROL, | ||
356 | SIM_DUMP_SPR_ARG(SIM_DUMP_BACKTRACE)); | ||
357 | } | ||
358 | #endif | ||
359 | kbt->verbose = 1; | ||
360 | i = 0; | ||
361 | for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { | ||
362 | char *modname; | ||
363 | const char *name; | ||
364 | unsigned long address = kbt->it.pc; | ||
365 | unsigned long offset, size; | ||
366 | char namebuf[KSYM_NAME_LEN+100]; | ||
367 | |||
368 | if (address >= PAGE_OFFSET) | ||
369 | name = kallsyms_lookup(address, &size, &offset, | ||
370 | &modname, namebuf); | ||
371 | else | ||
372 | name = NULL; | ||
373 | |||
374 | if (!name) | ||
375 | namebuf[0] = '\0'; | ||
376 | else { | ||
377 | size_t namelen = strlen(namebuf); | ||
378 | size_t remaining = (sizeof(namebuf) - 1) - namelen; | ||
379 | char *p = namebuf + namelen; | ||
380 | int rc = snprintf(p, remaining, "+%#lx/%#lx ", | ||
381 | offset, size); | ||
382 | if (modname && rc < remaining) | ||
383 | snprintf(p + rc, remaining - rc, | ||
384 | "[%s] ", modname); | ||
385 | namebuf[sizeof(namebuf)-1] = '\0'; | ||
386 | } | ||
387 | |||
388 | printk(KERN_ERR " frame %d: 0x%lx %s(sp 0x%lx)\n", | ||
389 | i++, address, namebuf, (unsigned long)(kbt->it.sp)); | ||
390 | |||
391 | if (i >= 100) { | ||
392 | printk(KERN_ERR "Stack dump truncated" | ||
393 | " (%d frames)\n", i); | ||
394 | break; | ||
395 | } | ||
396 | } | ||
397 | if (headers) | ||
398 | printk(KERN_ERR "Stack dump complete\n"); | ||
399 | } | ||
400 | EXPORT_SYMBOL(tile_show_stack); | ||
401 | |||
402 | |||
403 | /* This is called from show_regs() and _dump_stack() */ | ||
404 | void dump_stack_regs(struct pt_regs *regs) | ||
405 | { | ||
406 | struct KBacktraceIterator kbt; | ||
407 | KBacktraceIterator_init(&kbt, NULL, regs); | ||
408 | tile_show_stack(&kbt, 1); | ||
409 | } | ||
410 | EXPORT_SYMBOL(dump_stack_regs); | ||
411 | |||
412 | static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, | ||
413 | ulong pc, ulong lr, ulong sp, ulong r52) | ||
414 | { | ||
415 | memset(regs, 0, sizeof(struct pt_regs)); | ||
416 | regs->pc = pc; | ||
417 | regs->lr = lr; | ||
418 | regs->sp = sp; | ||
419 | regs->regs[52] = r52; | ||
420 | return regs; | ||
421 | } | ||
422 | |||
423 | /* This is called from dump_stack() and just converts to pt_regs */ | ||
424 | void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | ||
425 | { | ||
426 | struct pt_regs regs; | ||
427 | dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52)); | ||
428 | } | ||
429 | |||
430 | /* This is called from KBacktraceIterator_init_current() */ | ||
431 | void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, | ||
432 | ulong lr, ulong sp, ulong r52) | ||
433 | { | ||
434 | struct pt_regs regs; | ||
435 | KBacktraceIterator_init(kbt, NULL, | ||
436 | regs_to_pt_regs(®s, pc, lr, sp, r52)); | ||
437 | } | ||
438 | |||
439 | /* This is called only from kernel/sched.c, with esp == NULL */ | ||
440 | void show_stack(struct task_struct *task, unsigned long *esp) | ||
441 | { | ||
442 | struct KBacktraceIterator kbt; | ||
443 | if (task == NULL || task == current) | ||
444 | KBacktraceIterator_init_current(&kbt); | ||
445 | else | ||
446 | KBacktraceIterator_init(&kbt, task, NULL); | ||
447 | tile_show_stack(&kbt, 0); | ||
448 | } | ||
449 | |||
450 | #ifdef CONFIG_STACKTRACE | ||
451 | |||
452 | /* Support generic Linux stack API too */ | ||
453 | |||
454 | void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) | ||
455 | { | ||
456 | struct KBacktraceIterator kbt; | ||
457 | int skip = trace->skip; | ||
458 | int i = 0; | ||
459 | |||
460 | if (task == NULL || task == current) | ||
461 | KBacktraceIterator_init_current(&kbt); | ||
462 | else | ||
463 | KBacktraceIterator_init(&kbt, task, NULL); | ||
464 | for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { | ||
465 | if (skip) { | ||
466 | --skip; | ||
467 | continue; | ||
468 | } | ||
469 | if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) | ||
470 | break; | ||
471 | trace->entries[i++] = kbt.it.pc; | ||
472 | } | ||
473 | trace->nr_entries = i; | ||
474 | } | ||
475 | EXPORT_SYMBOL(save_stack_trace_tsk); | ||
476 | |||
477 | void save_stack_trace(struct stack_trace *trace) | ||
478 | { | ||
479 | save_stack_trace_tsk(NULL, trace); | ||
480 | } | ||
481 | |||
482 | #endif | ||
483 | |||
484 | /* In entry.S */ | ||
485 | EXPORT_SYMBOL(KBacktraceIterator_init_current); | ||
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c new file mode 100644 index 000000000000..a3d982b212b4 --- /dev/null +++ b/arch/tile/kernel/sys.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains various random system calls that | ||
15 | * have a non-standard calling sequence on the Linux/TILE | ||
16 | * platform. | ||
17 | */ | ||
18 | |||
19 | #include <linux/errno.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/smp_lock.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | #include <linux/mman.h> | ||
26 | #include <linux/file.h> | ||
27 | #include <linux/mempolicy.h> | ||
28 | #include <linux/binfmts.h> | ||
29 | #include <linux/fs.h> | ||
30 | #include <linux/syscalls.h> | ||
31 | #include <linux/uaccess.h> | ||
32 | #include <linux/signal.h> | ||
33 | #include <asm/syscalls.h> | ||
34 | |||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/homecache.h> | ||
37 | #include <arch/chip.h> | ||
38 | |||
39 | SYSCALL_DEFINE0(flush_cache) | ||
40 | { | ||
41 | homecache_evict(cpumask_of(smp_processor_id())); | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Syscalls that pass 64-bit values on 32-bit systems normally | ||
47 | * pass them as (low,high) word packed into the immediately adjacent | ||
48 | * registers. If the low word naturally falls on an even register, | ||
49 | * our ABI makes it work correctly; if not, we adjust it here. | ||
50 | * Handling it here means we don't have to fix uclibc AND glibc AND | ||
51 | * any other standard libcs we want to support. | ||
52 | */ | ||
53 | |||
54 | #if !defined(__tilegx__) || defined(CONFIG_COMPAT) | ||
55 | |||
56 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count) | ||
57 | { | ||
58 | return sys_readahead(fd, ((loff_t)offset_hi << 32) | offset_lo, count); | ||
59 | } | ||
60 | |||
61 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
62 | u32 len, int advice) | ||
63 | { | ||
64 | return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
65 | len, advice); | ||
66 | } | ||
67 | |||
68 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
69 | u32 len_lo, u32 len_hi, int advice) | ||
70 | { | ||
71 | return sys_fadvise64_64(fd, ((loff_t)offset_hi << 32) | offset_lo, | ||
72 | ((loff_t)len_hi << 32) | len_lo, advice); | ||
73 | } | ||
74 | |||
75 | #endif /* 32-bit syscall wrappers */ | ||
76 | |||
77 | /* | ||
78 | * This API uses a 4KB-page-count offset into the file descriptor. | ||
79 | * It is likely not the right API to use on a 64-bit platform. | ||
80 | */ | ||
81 | SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, | ||
82 | unsigned long, prot, unsigned long, flags, | ||
83 | unsigned long, fd, unsigned long, off_4k) | ||
84 | { | ||
85 | #define PAGE_ADJUST (PAGE_SHIFT - 12) | ||
86 | if (off_4k & ((1 << PAGE_ADJUST) - 1)) | ||
87 | return -EINVAL; | ||
88 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
89 | off_4k >> PAGE_ADJUST); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * This API uses a byte offset into the file descriptor. | ||
94 | * It is likely not the right API to use on a 32-bit platform. | ||
95 | */ | ||
96 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, | ||
97 | unsigned long, prot, unsigned long, flags, | ||
98 | unsigned long, fd, unsigned long, offset) | ||
99 | { | ||
100 | if (offset & ((1 << PAGE_SHIFT) - 1)) | ||
101 | return -EINVAL; | ||
102 | return sys_mmap_pgoff(addr, len, prot, flags, fd, | ||
103 | offset >> PAGE_SHIFT); | ||
104 | } | ||
105 | |||
106 | |||
107 | /* Provide the actual syscall number to call mapping. */ | ||
108 | #undef __SYSCALL | ||
109 | #define __SYSCALL(nr, call) [nr] = (call), | ||
110 | |||
111 | #ifndef __tilegx__ | ||
112 | /* See comments at the top of the file. */ | ||
113 | #define sys_fadvise64 sys32_fadvise64 | ||
114 | #define sys_fadvise64_64 sys32_fadvise64_64 | ||
115 | #define sys_readahead sys32_readahead | ||
116 | #define sys_sync_file_range sys_sync_file_range2 | ||
117 | #endif | ||
118 | |||
119 | void *sys_call_table[__NR_syscalls] = { | ||
120 | [0 ... __NR_syscalls-1] = sys_ni_syscall, | ||
121 | #include <asm/unistd.h> | ||
122 | }; | ||
diff --git a/arch/tile/kernel/tile-desc_32.c b/arch/tile/kernel/tile-desc_32.c new file mode 100644 index 000000000000..3b78369f86b0 --- /dev/null +++ b/arch/tile/kernel/tile-desc_32.c | |||
@@ -0,0 +1,13826 @@ | |||
1 | /* Define to include "bfd.h" and get actual BFD relocations below. */ | ||
2 | /* #define WANT_BFD_RELOCS */ | ||
3 | |||
4 | #ifdef WANT_BFD_RELOCS | ||
5 | #include "bfd.h" | ||
6 | #define MAYBE_BFD_RELOC(X) (X) | ||
7 | #else | ||
8 | #define MAYBE_BFD_RELOC(X) -1 | ||
9 | #endif | ||
10 | |||
11 | /* Special registers. */ | ||
12 | #define TREG_LR 55 | ||
13 | #define TREG_SN 56 | ||
14 | #define TREG_ZERO 63 | ||
15 | |||
16 | /* FIXME: Rename this. */ | ||
17 | #include <asm/opcode-tile.h> | ||
18 | |||
19 | |||
20 | const struct tile_opcode tile_opcodes[394] = | ||
21 | { | ||
22 | { "bpt", TILE_OPC_BPT, 0x2 /* pipes */, 0 /* num_operands */, | ||
23 | TREG_ZERO, /* implicitly_written_register */ | ||
24 | 0, /* can_bundle */ | ||
25 | { | ||
26 | /* operands */ | ||
27 | { 0, }, | ||
28 | { }, | ||
29 | { 0, }, | ||
30 | { 0, }, | ||
31 | { 0, } | ||
32 | }, | ||
33 | { | ||
34 | /* fixed_bit_masks */ | ||
35 | 0ULL, | ||
36 | 0xfbffffff80000000ULL, | ||
37 | 0ULL, | ||
38 | 0ULL, | ||
39 | 0ULL | ||
40 | }, | ||
41 | { | ||
42 | /* fixed_bit_values */ | ||
43 | -1ULL, | ||
44 | 0x400b3cae00000000ULL, | ||
45 | -1ULL, | ||
46 | -1ULL, | ||
47 | -1ULL | ||
48 | } | ||
49 | }, | ||
50 | { "info", TILE_OPC_INFO, 0xf /* pipes */, 1 /* num_operands */, | ||
51 | TREG_ZERO, /* implicitly_written_register */ | ||
52 | 1, /* can_bundle */ | ||
53 | { | ||
54 | /* operands */ | ||
55 | { 0 }, | ||
56 | { 1 }, | ||
57 | { 2 }, | ||
58 | { 3 }, | ||
59 | { 0, } | ||
60 | }, | ||
61 | { | ||
62 | /* fixed_bit_masks */ | ||
63 | 0x800000007ff00fffULL, | ||
64 | 0xfff807ff80000000ULL, | ||
65 | 0x8000000078000fffULL, | ||
66 | 0xf80007ff80000000ULL, | ||
67 | 0ULL | ||
68 | }, | ||
69 | { | ||
70 | /* fixed_bit_values */ | ||
71 | 0x0000000050100fffULL, | ||
72 | 0x302007ff80000000ULL, | ||
73 | 0x8000000050000fffULL, | ||
74 | 0xc00007ff80000000ULL, | ||
75 | -1ULL | ||
76 | } | ||
77 | }, | ||
78 | { "infol", TILE_OPC_INFOL, 0x3 /* pipes */, 1 /* num_operands */, | ||
79 | TREG_ZERO, /* implicitly_written_register */ | ||
80 | 1, /* can_bundle */ | ||
81 | { | ||
82 | /* operands */ | ||
83 | { 4 }, | ||
84 | { 5 }, | ||
85 | { 0, }, | ||
86 | { 0, }, | ||
87 | { 0, } | ||
88 | }, | ||
89 | { | ||
90 | /* fixed_bit_masks */ | ||
91 | 0x8000000070000fffULL, | ||
92 | 0xf80007ff80000000ULL, | ||
93 | 0ULL, | ||
94 | 0ULL, | ||
95 | 0ULL | ||
96 | }, | ||
97 | { | ||
98 | /* fixed_bit_values */ | ||
99 | 0x0000000030000fffULL, | ||
100 | 0x200007ff80000000ULL, | ||
101 | -1ULL, | ||
102 | -1ULL, | ||
103 | -1ULL | ||
104 | } | ||
105 | }, | ||
106 | { "j", TILE_OPC_J, 0x2 /* pipes */, 1 /* num_operands */, | ||
107 | TREG_ZERO, /* implicitly_written_register */ | ||
108 | 1, /* can_bundle */ | ||
109 | { | ||
110 | /* operands */ | ||
111 | { 0, }, | ||
112 | { 6 }, | ||
113 | { 0, }, | ||
114 | { 0, }, | ||
115 | { 0, } | ||
116 | }, | ||
117 | { | ||
118 | /* fixed_bit_masks */ | ||
119 | 0ULL, | ||
120 | 0xf000000000000000ULL, | ||
121 | 0ULL, | ||
122 | 0ULL, | ||
123 | 0ULL | ||
124 | }, | ||
125 | { | ||
126 | /* fixed_bit_values */ | ||
127 | -1ULL, | ||
128 | 0x5000000000000000ULL, | ||
129 | -1ULL, | ||
130 | -1ULL, | ||
131 | -1ULL | ||
132 | } | ||
133 | }, | ||
134 | { "jal", TILE_OPC_JAL, 0x2 /* pipes */, 1 /* num_operands */, | ||
135 | TREG_LR, /* implicitly_written_register */ | ||
136 | 1, /* can_bundle */ | ||
137 | { | ||
138 | /* operands */ | ||
139 | { 0, }, | ||
140 | { 6 }, | ||
141 | { 0, }, | ||
142 | { 0, }, | ||
143 | { 0, } | ||
144 | }, | ||
145 | { | ||
146 | /* fixed_bit_masks */ | ||
147 | 0ULL, | ||
148 | 0xf000000000000000ULL, | ||
149 | 0ULL, | ||
150 | 0ULL, | ||
151 | 0ULL | ||
152 | }, | ||
153 | { | ||
154 | /* fixed_bit_values */ | ||
155 | -1ULL, | ||
156 | 0x6000000000000000ULL, | ||
157 | -1ULL, | ||
158 | -1ULL, | ||
159 | -1ULL | ||
160 | } | ||
161 | }, | ||
162 | { "move", TILE_OPC_MOVE, 0xf /* pipes */, 2 /* num_operands */, | ||
163 | TREG_ZERO, /* implicitly_written_register */ | ||
164 | 1, /* can_bundle */ | ||
165 | { | ||
166 | /* operands */ | ||
167 | { 7, 8 }, | ||
168 | { 9, 10 }, | ||
169 | { 11, 12 }, | ||
170 | { 13, 14 }, | ||
171 | { 0, } | ||
172 | }, | ||
173 | { | ||
174 | /* fixed_bit_masks */ | ||
175 | 0x800000007ffff000ULL, | ||
176 | 0xfffff80000000000ULL, | ||
177 | 0x80000000780ff000ULL, | ||
178 | 0xf807f80000000000ULL, | ||
179 | 0ULL | ||
180 | }, | ||
181 | { | ||
182 | /* fixed_bit_values */ | ||
183 | 0x0000000000cff000ULL, | ||
184 | 0x0833f80000000000ULL, | ||
185 | 0x80000000180bf000ULL, | ||
186 | 0x9805f80000000000ULL, | ||
187 | -1ULL | ||
188 | } | ||
189 | }, | ||
190 | { "move.sn", TILE_OPC_MOVE_SN, 0x3 /* pipes */, 2 /* num_operands */, | ||
191 | TREG_SN, /* implicitly_written_register */ | ||
192 | 1, /* can_bundle */ | ||
193 | { | ||
194 | /* operands */ | ||
195 | { 7, 8 }, | ||
196 | { 9, 10 }, | ||
197 | { 0, }, | ||
198 | { 0, }, | ||
199 | { 0, } | ||
200 | }, | ||
201 | { | ||
202 | /* fixed_bit_masks */ | ||
203 | 0x800000007ffff000ULL, | ||
204 | 0xfffff80000000000ULL, | ||
205 | 0ULL, | ||
206 | 0ULL, | ||
207 | 0ULL | ||
208 | }, | ||
209 | { | ||
210 | /* fixed_bit_values */ | ||
211 | 0x0000000008cff000ULL, | ||
212 | 0x0c33f80000000000ULL, | ||
213 | -1ULL, | ||
214 | -1ULL, | ||
215 | -1ULL | ||
216 | } | ||
217 | }, | ||
218 | { "movei", TILE_OPC_MOVEI, 0xf /* pipes */, 2 /* num_operands */, | ||
219 | TREG_ZERO, /* implicitly_written_register */ | ||
220 | 1, /* can_bundle */ | ||
221 | { | ||
222 | /* operands */ | ||
223 | { 7, 0 }, | ||
224 | { 9, 1 }, | ||
225 | { 11, 2 }, | ||
226 | { 13, 3 }, | ||
227 | { 0, } | ||
228 | }, | ||
229 | { | ||
230 | /* fixed_bit_masks */ | ||
231 | 0x800000007ff00fc0ULL, | ||
232 | 0xfff807e000000000ULL, | ||
233 | 0x8000000078000fc0ULL, | ||
234 | 0xf80007e000000000ULL, | ||
235 | 0ULL | ||
236 | }, | ||
237 | { | ||
238 | /* fixed_bit_values */ | ||
239 | 0x0000000040800fc0ULL, | ||
240 | 0x305807e000000000ULL, | ||
241 | 0x8000000058000fc0ULL, | ||
242 | 0xc80007e000000000ULL, | ||
243 | -1ULL | ||
244 | } | ||
245 | }, | ||
246 | { "movei.sn", TILE_OPC_MOVEI_SN, 0x3 /* pipes */, 2 /* num_operands */, | ||
247 | TREG_SN, /* implicitly_written_register */ | ||
248 | 1, /* can_bundle */ | ||
249 | { | ||
250 | /* operands */ | ||
251 | { 7, 0 }, | ||
252 | { 9, 1 }, | ||
253 | { 0, }, | ||
254 | { 0, }, | ||
255 | { 0, } | ||
256 | }, | ||
257 | { | ||
258 | /* fixed_bit_masks */ | ||
259 | 0x800000007ff00fc0ULL, | ||
260 | 0xfff807e000000000ULL, | ||
261 | 0ULL, | ||
262 | 0ULL, | ||
263 | 0ULL | ||
264 | }, | ||
265 | { | ||
266 | /* fixed_bit_values */ | ||
267 | 0x0000000048800fc0ULL, | ||
268 | 0x345807e000000000ULL, | ||
269 | -1ULL, | ||
270 | -1ULL, | ||
271 | -1ULL | ||
272 | } | ||
273 | }, | ||
274 | { "moveli", TILE_OPC_MOVELI, 0x3 /* pipes */, 2 /* num_operands */, | ||
275 | TREG_ZERO, /* implicitly_written_register */ | ||
276 | 1, /* can_bundle */ | ||
277 | { | ||
278 | /* operands */ | ||
279 | { 7, 4 }, | ||
280 | { 9, 5 }, | ||
281 | { 0, }, | ||
282 | { 0, }, | ||
283 | { 0, } | ||
284 | }, | ||
285 | { | ||
286 | /* fixed_bit_masks */ | ||
287 | 0x8000000070000fc0ULL, | ||
288 | 0xf80007e000000000ULL, | ||
289 | 0ULL, | ||
290 | 0ULL, | ||
291 | 0ULL | ||
292 | }, | ||
293 | { | ||
294 | /* fixed_bit_values */ | ||
295 | 0x0000000020000fc0ULL, | ||
296 | 0x180007e000000000ULL, | ||
297 | -1ULL, | ||
298 | -1ULL, | ||
299 | -1ULL | ||
300 | } | ||
301 | }, | ||
302 | { "moveli.sn", TILE_OPC_MOVELI_SN, 0x3 /* pipes */, 2 /* num_operands */, | ||
303 | TREG_SN, /* implicitly_written_register */ | ||
304 | 1, /* can_bundle */ | ||
305 | { | ||
306 | /* operands */ | ||
307 | { 7, 4 }, | ||
308 | { 9, 5 }, | ||
309 | { 0, }, | ||
310 | { 0, }, | ||
311 | { 0, } | ||
312 | }, | ||
313 | { | ||
314 | /* fixed_bit_masks */ | ||
315 | 0x8000000070000fc0ULL, | ||
316 | 0xf80007e000000000ULL, | ||
317 | 0ULL, | ||
318 | 0ULL, | ||
319 | 0ULL | ||
320 | }, | ||
321 | { | ||
322 | /* fixed_bit_values */ | ||
323 | 0x0000000010000fc0ULL, | ||
324 | 0x100007e000000000ULL, | ||
325 | -1ULL, | ||
326 | -1ULL, | ||
327 | -1ULL | ||
328 | } | ||
329 | }, | ||
330 | { "movelis", TILE_OPC_MOVELIS, 0x3 /* pipes */, 2 /* num_operands */, | ||
331 | TREG_SN, /* implicitly_written_register */ | ||
332 | 1, /* can_bundle */ | ||
333 | { | ||
334 | /* operands */ | ||
335 | { 7, 4 }, | ||
336 | { 9, 5 }, | ||
337 | { 0, }, | ||
338 | { 0, }, | ||
339 | { 0, } | ||
340 | }, | ||
341 | { | ||
342 | /* fixed_bit_masks */ | ||
343 | 0x8000000070000fc0ULL, | ||
344 | 0xf80007e000000000ULL, | ||
345 | 0ULL, | ||
346 | 0ULL, | ||
347 | 0ULL | ||
348 | }, | ||
349 | { | ||
350 | /* fixed_bit_values */ | ||
351 | 0x0000000010000fc0ULL, | ||
352 | 0x100007e000000000ULL, | ||
353 | -1ULL, | ||
354 | -1ULL, | ||
355 | -1ULL | ||
356 | } | ||
357 | }, | ||
358 | { "prefetch", TILE_OPC_PREFETCH, 0x12 /* pipes */, 1 /* num_operands */, | ||
359 | TREG_ZERO, /* implicitly_written_register */ | ||
360 | 1, /* can_bundle */ | ||
361 | { | ||
362 | /* operands */ | ||
363 | { 0, }, | ||
364 | { 10 }, | ||
365 | { 0, }, | ||
366 | { 0, }, | ||
367 | { 15 } | ||
368 | }, | ||
369 | { | ||
370 | /* fixed_bit_masks */ | ||
371 | 0ULL, | ||
372 | 0xfffff81f80000000ULL, | ||
373 | 0ULL, | ||
374 | 0ULL, | ||
375 | 0x8700000003f00000ULL | ||
376 | }, | ||
377 | { | ||
378 | /* fixed_bit_values */ | ||
379 | -1ULL, | ||
380 | 0x400b501f80000000ULL, | ||
381 | -1ULL, | ||
382 | -1ULL, | ||
383 | 0x8000000003f00000ULL | ||
384 | } | ||
385 | }, | ||
386 | { "add", TILE_OPC_ADD, 0xf /* pipes */, 3 /* num_operands */, | ||
387 | TREG_ZERO, /* implicitly_written_register */ | ||
388 | 1, /* can_bundle */ | ||
389 | { | ||
390 | /* operands */ | ||
391 | { 7, 8, 16 }, | ||
392 | { 9, 10, 17 }, | ||
393 | { 11, 12, 18 }, | ||
394 | { 13, 14, 19 }, | ||
395 | { 0, } | ||
396 | }, | ||
397 | { | ||
398 | /* fixed_bit_masks */ | ||
399 | 0x800000007ffc0000ULL, | ||
400 | 0xfffe000000000000ULL, | ||
401 | 0x80000000780c0000ULL, | ||
402 | 0xf806000000000000ULL, | ||
403 | 0ULL | ||
404 | }, | ||
405 | { | ||
406 | /* fixed_bit_values */ | ||
407 | 0x00000000000c0000ULL, | ||
408 | 0x0806000000000000ULL, | ||
409 | 0x8000000008000000ULL, | ||
410 | 0x8800000000000000ULL, | ||
411 | -1ULL | ||
412 | } | ||
413 | }, | ||
414 | { "add.sn", TILE_OPC_ADD_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
415 | TREG_SN, /* implicitly_written_register */ | ||
416 | 1, /* can_bundle */ | ||
417 | { | ||
418 | /* operands */ | ||
419 | { 7, 8, 16 }, | ||
420 | { 9, 10, 17 }, | ||
421 | { 0, }, | ||
422 | { 0, }, | ||
423 | { 0, } | ||
424 | }, | ||
425 | { | ||
426 | /* fixed_bit_masks */ | ||
427 | 0x800000007ffc0000ULL, | ||
428 | 0xfffe000000000000ULL, | ||
429 | 0ULL, | ||
430 | 0ULL, | ||
431 | 0ULL | ||
432 | }, | ||
433 | { | ||
434 | /* fixed_bit_values */ | ||
435 | 0x00000000080c0000ULL, | ||
436 | 0x0c06000000000000ULL, | ||
437 | -1ULL, | ||
438 | -1ULL, | ||
439 | -1ULL | ||
440 | } | ||
441 | }, | ||
442 | { "addb", TILE_OPC_ADDB, 0x3 /* pipes */, 3 /* num_operands */, | ||
443 | TREG_ZERO, /* implicitly_written_register */ | ||
444 | 1, /* can_bundle */ | ||
445 | { | ||
446 | /* operands */ | ||
447 | { 7, 8, 16 }, | ||
448 | { 9, 10, 17 }, | ||
449 | { 0, }, | ||
450 | { 0, }, | ||
451 | { 0, } | ||
452 | }, | ||
453 | { | ||
454 | /* fixed_bit_masks */ | ||
455 | 0x800000007ffc0000ULL, | ||
456 | 0xfffe000000000000ULL, | ||
457 | 0ULL, | ||
458 | 0ULL, | ||
459 | 0ULL | ||
460 | }, | ||
461 | { | ||
462 | /* fixed_bit_values */ | ||
463 | 0x0000000000040000ULL, | ||
464 | 0x0802000000000000ULL, | ||
465 | -1ULL, | ||
466 | -1ULL, | ||
467 | -1ULL | ||
468 | } | ||
469 | }, | ||
470 | { "addb.sn", TILE_OPC_ADDB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
471 | TREG_SN, /* implicitly_written_register */ | ||
472 | 1, /* can_bundle */ | ||
473 | { | ||
474 | /* operands */ | ||
475 | { 7, 8, 16 }, | ||
476 | { 9, 10, 17 }, | ||
477 | { 0, }, | ||
478 | { 0, }, | ||
479 | { 0, } | ||
480 | }, | ||
481 | { | ||
482 | /* fixed_bit_masks */ | ||
483 | 0x800000007ffc0000ULL, | ||
484 | 0xfffe000000000000ULL, | ||
485 | 0ULL, | ||
486 | 0ULL, | ||
487 | 0ULL | ||
488 | }, | ||
489 | { | ||
490 | /* fixed_bit_values */ | ||
491 | 0x0000000008040000ULL, | ||
492 | 0x0c02000000000000ULL, | ||
493 | -1ULL, | ||
494 | -1ULL, | ||
495 | -1ULL | ||
496 | } | ||
497 | }, | ||
498 | { "addbs_u", TILE_OPC_ADDBS_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
499 | TREG_ZERO, /* implicitly_written_register */ | ||
500 | 1, /* can_bundle */ | ||
501 | { | ||
502 | /* operands */ | ||
503 | { 7, 8, 16 }, | ||
504 | { 9, 10, 17 }, | ||
505 | { 0, }, | ||
506 | { 0, }, | ||
507 | { 0, } | ||
508 | }, | ||
509 | { | ||
510 | /* fixed_bit_masks */ | ||
511 | 0x800000007ffc0000ULL, | ||
512 | 0xfffe000000000000ULL, | ||
513 | 0ULL, | ||
514 | 0ULL, | ||
515 | 0ULL | ||
516 | }, | ||
517 | { | ||
518 | /* fixed_bit_values */ | ||
519 | 0x0000000001880000ULL, | ||
520 | 0x0888000000000000ULL, | ||
521 | -1ULL, | ||
522 | -1ULL, | ||
523 | -1ULL | ||
524 | } | ||
525 | }, | ||
526 | { "addbs_u.sn", TILE_OPC_ADDBS_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
527 | TREG_SN, /* implicitly_written_register */ | ||
528 | 1, /* can_bundle */ | ||
529 | { | ||
530 | /* operands */ | ||
531 | { 7, 8, 16 }, | ||
532 | { 9, 10, 17 }, | ||
533 | { 0, }, | ||
534 | { 0, }, | ||
535 | { 0, } | ||
536 | }, | ||
537 | { | ||
538 | /* fixed_bit_masks */ | ||
539 | 0x800000007ffc0000ULL, | ||
540 | 0xfffe000000000000ULL, | ||
541 | 0ULL, | ||
542 | 0ULL, | ||
543 | 0ULL | ||
544 | }, | ||
545 | { | ||
546 | /* fixed_bit_values */ | ||
547 | 0x0000000009880000ULL, | ||
548 | 0x0c88000000000000ULL, | ||
549 | -1ULL, | ||
550 | -1ULL, | ||
551 | -1ULL | ||
552 | } | ||
553 | }, | ||
554 | { "addh", TILE_OPC_ADDH, 0x3 /* pipes */, 3 /* num_operands */, | ||
555 | TREG_ZERO, /* implicitly_written_register */ | ||
556 | 1, /* can_bundle */ | ||
557 | { | ||
558 | /* operands */ | ||
559 | { 7, 8, 16 }, | ||
560 | { 9, 10, 17 }, | ||
561 | { 0, }, | ||
562 | { 0, }, | ||
563 | { 0, } | ||
564 | }, | ||
565 | { | ||
566 | /* fixed_bit_masks */ | ||
567 | 0x800000007ffc0000ULL, | ||
568 | 0xfffe000000000000ULL, | ||
569 | 0ULL, | ||
570 | 0ULL, | ||
571 | 0ULL | ||
572 | }, | ||
573 | { | ||
574 | /* fixed_bit_values */ | ||
575 | 0x0000000000080000ULL, | ||
576 | 0x0804000000000000ULL, | ||
577 | -1ULL, | ||
578 | -1ULL, | ||
579 | -1ULL | ||
580 | } | ||
581 | }, | ||
582 | { "addh.sn", TILE_OPC_ADDH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
583 | TREG_SN, /* implicitly_written_register */ | ||
584 | 1, /* can_bundle */ | ||
585 | { | ||
586 | /* operands */ | ||
587 | { 7, 8, 16 }, | ||
588 | { 9, 10, 17 }, | ||
589 | { 0, }, | ||
590 | { 0, }, | ||
591 | { 0, } | ||
592 | }, | ||
593 | { | ||
594 | /* fixed_bit_masks */ | ||
595 | 0x800000007ffc0000ULL, | ||
596 | 0xfffe000000000000ULL, | ||
597 | 0ULL, | ||
598 | 0ULL, | ||
599 | 0ULL | ||
600 | }, | ||
601 | { | ||
602 | /* fixed_bit_values */ | ||
603 | 0x0000000008080000ULL, | ||
604 | 0x0c04000000000000ULL, | ||
605 | -1ULL, | ||
606 | -1ULL, | ||
607 | -1ULL | ||
608 | } | ||
609 | }, | ||
610 | { "addhs", TILE_OPC_ADDHS, 0x3 /* pipes */, 3 /* num_operands */, | ||
611 | TREG_ZERO, /* implicitly_written_register */ | ||
612 | 1, /* can_bundle */ | ||
613 | { | ||
614 | /* operands */ | ||
615 | { 7, 8, 16 }, | ||
616 | { 9, 10, 17 }, | ||
617 | { 0, }, | ||
618 | { 0, }, | ||
619 | { 0, } | ||
620 | }, | ||
621 | { | ||
622 | /* fixed_bit_masks */ | ||
623 | 0x800000007ffc0000ULL, | ||
624 | 0xfffe000000000000ULL, | ||
625 | 0ULL, | ||
626 | 0ULL, | ||
627 | 0ULL | ||
628 | }, | ||
629 | { | ||
630 | /* fixed_bit_values */ | ||
631 | 0x00000000018c0000ULL, | ||
632 | 0x088a000000000000ULL, | ||
633 | -1ULL, | ||
634 | -1ULL, | ||
635 | -1ULL | ||
636 | } | ||
637 | }, | ||
638 | { "addhs.sn", TILE_OPC_ADDHS_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
639 | TREG_SN, /* implicitly_written_register */ | ||
640 | 1, /* can_bundle */ | ||
641 | { | ||
642 | /* operands */ | ||
643 | { 7, 8, 16 }, | ||
644 | { 9, 10, 17 }, | ||
645 | { 0, }, | ||
646 | { 0, }, | ||
647 | { 0, } | ||
648 | }, | ||
649 | { | ||
650 | /* fixed_bit_masks */ | ||
651 | 0x800000007ffc0000ULL, | ||
652 | 0xfffe000000000000ULL, | ||
653 | 0ULL, | ||
654 | 0ULL, | ||
655 | 0ULL | ||
656 | }, | ||
657 | { | ||
658 | /* fixed_bit_values */ | ||
659 | 0x00000000098c0000ULL, | ||
660 | 0x0c8a000000000000ULL, | ||
661 | -1ULL, | ||
662 | -1ULL, | ||
663 | -1ULL | ||
664 | } | ||
665 | }, | ||
666 | { "addi", TILE_OPC_ADDI, 0xf /* pipes */, 3 /* num_operands */, | ||
667 | TREG_ZERO, /* implicitly_written_register */ | ||
668 | 1, /* can_bundle */ | ||
669 | { | ||
670 | /* operands */ | ||
671 | { 7, 8, 0 }, | ||
672 | { 9, 10, 1 }, | ||
673 | { 11, 12, 2 }, | ||
674 | { 13, 14, 3 }, | ||
675 | { 0, } | ||
676 | }, | ||
677 | { | ||
678 | /* fixed_bit_masks */ | ||
679 | 0x800000007ff00000ULL, | ||
680 | 0xfff8000000000000ULL, | ||
681 | 0x8000000078000000ULL, | ||
682 | 0xf800000000000000ULL, | ||
683 | 0ULL | ||
684 | }, | ||
685 | { | ||
686 | /* fixed_bit_values */ | ||
687 | 0x0000000040300000ULL, | ||
688 | 0x3018000000000000ULL, | ||
689 | 0x8000000048000000ULL, | ||
690 | 0xb800000000000000ULL, | ||
691 | -1ULL | ||
692 | } | ||
693 | }, | ||
694 | { "addi.sn", TILE_OPC_ADDI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
695 | TREG_SN, /* implicitly_written_register */ | ||
696 | 1, /* can_bundle */ | ||
697 | { | ||
698 | /* operands */ | ||
699 | { 7, 8, 0 }, | ||
700 | { 9, 10, 1 }, | ||
701 | { 0, }, | ||
702 | { 0, }, | ||
703 | { 0, } | ||
704 | }, | ||
705 | { | ||
706 | /* fixed_bit_masks */ | ||
707 | 0x800000007ff00000ULL, | ||
708 | 0xfff8000000000000ULL, | ||
709 | 0ULL, | ||
710 | 0ULL, | ||
711 | 0ULL | ||
712 | }, | ||
713 | { | ||
714 | /* fixed_bit_values */ | ||
715 | 0x0000000048300000ULL, | ||
716 | 0x3418000000000000ULL, | ||
717 | -1ULL, | ||
718 | -1ULL, | ||
719 | -1ULL | ||
720 | } | ||
721 | }, | ||
722 | { "addib", TILE_OPC_ADDIB, 0x3 /* pipes */, 3 /* num_operands */, | ||
723 | TREG_ZERO, /* implicitly_written_register */ | ||
724 | 1, /* can_bundle */ | ||
725 | { | ||
726 | /* operands */ | ||
727 | { 7, 8, 0 }, | ||
728 | { 9, 10, 1 }, | ||
729 | { 0, }, | ||
730 | { 0, }, | ||
731 | { 0, } | ||
732 | }, | ||
733 | { | ||
734 | /* fixed_bit_masks */ | ||
735 | 0x800000007ff00000ULL, | ||
736 | 0xfff8000000000000ULL, | ||
737 | 0ULL, | ||
738 | 0ULL, | ||
739 | 0ULL | ||
740 | }, | ||
741 | { | ||
742 | /* fixed_bit_values */ | ||
743 | 0x0000000040100000ULL, | ||
744 | 0x3008000000000000ULL, | ||
745 | -1ULL, | ||
746 | -1ULL, | ||
747 | -1ULL | ||
748 | } | ||
749 | }, | ||
750 | { "addib.sn", TILE_OPC_ADDIB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
751 | TREG_SN, /* implicitly_written_register */ | ||
752 | 1, /* can_bundle */ | ||
753 | { | ||
754 | /* operands */ | ||
755 | { 7, 8, 0 }, | ||
756 | { 9, 10, 1 }, | ||
757 | { 0, }, | ||
758 | { 0, }, | ||
759 | { 0, } | ||
760 | }, | ||
761 | { | ||
762 | /* fixed_bit_masks */ | ||
763 | 0x800000007ff00000ULL, | ||
764 | 0xfff8000000000000ULL, | ||
765 | 0ULL, | ||
766 | 0ULL, | ||
767 | 0ULL | ||
768 | }, | ||
769 | { | ||
770 | /* fixed_bit_values */ | ||
771 | 0x0000000048100000ULL, | ||
772 | 0x3408000000000000ULL, | ||
773 | -1ULL, | ||
774 | -1ULL, | ||
775 | -1ULL | ||
776 | } | ||
777 | }, | ||
778 | { "addih", TILE_OPC_ADDIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
779 | TREG_ZERO, /* implicitly_written_register */ | ||
780 | 1, /* can_bundle */ | ||
781 | { | ||
782 | /* operands */ | ||
783 | { 7, 8, 0 }, | ||
784 | { 9, 10, 1 }, | ||
785 | { 0, }, | ||
786 | { 0, }, | ||
787 | { 0, } | ||
788 | }, | ||
789 | { | ||
790 | /* fixed_bit_masks */ | ||
791 | 0x800000007ff00000ULL, | ||
792 | 0xfff8000000000000ULL, | ||
793 | 0ULL, | ||
794 | 0ULL, | ||
795 | 0ULL | ||
796 | }, | ||
797 | { | ||
798 | /* fixed_bit_values */ | ||
799 | 0x0000000040200000ULL, | ||
800 | 0x3010000000000000ULL, | ||
801 | -1ULL, | ||
802 | -1ULL, | ||
803 | -1ULL | ||
804 | } | ||
805 | }, | ||
806 | { "addih.sn", TILE_OPC_ADDIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
807 | TREG_SN, /* implicitly_written_register */ | ||
808 | 1, /* can_bundle */ | ||
809 | { | ||
810 | /* operands */ | ||
811 | { 7, 8, 0 }, | ||
812 | { 9, 10, 1 }, | ||
813 | { 0, }, | ||
814 | { 0, }, | ||
815 | { 0, } | ||
816 | }, | ||
817 | { | ||
818 | /* fixed_bit_masks */ | ||
819 | 0x800000007ff00000ULL, | ||
820 | 0xfff8000000000000ULL, | ||
821 | 0ULL, | ||
822 | 0ULL, | ||
823 | 0ULL | ||
824 | }, | ||
825 | { | ||
826 | /* fixed_bit_values */ | ||
827 | 0x0000000048200000ULL, | ||
828 | 0x3410000000000000ULL, | ||
829 | -1ULL, | ||
830 | -1ULL, | ||
831 | -1ULL | ||
832 | } | ||
833 | }, | ||
834 | { "addli", TILE_OPC_ADDLI, 0x3 /* pipes */, 3 /* num_operands */, | ||
835 | TREG_ZERO, /* implicitly_written_register */ | ||
836 | 1, /* can_bundle */ | ||
837 | { | ||
838 | /* operands */ | ||
839 | { 7, 8, 4 }, | ||
840 | { 9, 10, 5 }, | ||
841 | { 0, }, | ||
842 | { 0, }, | ||
843 | { 0, } | ||
844 | }, | ||
845 | { | ||
846 | /* fixed_bit_masks */ | ||
847 | 0x8000000070000000ULL, | ||
848 | 0xf800000000000000ULL, | ||
849 | 0ULL, | ||
850 | 0ULL, | ||
851 | 0ULL | ||
852 | }, | ||
853 | { | ||
854 | /* fixed_bit_values */ | ||
855 | 0x0000000020000000ULL, | ||
856 | 0x1800000000000000ULL, | ||
857 | -1ULL, | ||
858 | -1ULL, | ||
859 | -1ULL | ||
860 | } | ||
861 | }, | ||
862 | { "addli.sn", TILE_OPC_ADDLI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
863 | TREG_SN, /* implicitly_written_register */ | ||
864 | 1, /* can_bundle */ | ||
865 | { | ||
866 | /* operands */ | ||
867 | { 7, 8, 4 }, | ||
868 | { 9, 10, 5 }, | ||
869 | { 0, }, | ||
870 | { 0, }, | ||
871 | { 0, } | ||
872 | }, | ||
873 | { | ||
874 | /* fixed_bit_masks */ | ||
875 | 0x8000000070000000ULL, | ||
876 | 0xf800000000000000ULL, | ||
877 | 0ULL, | ||
878 | 0ULL, | ||
879 | 0ULL | ||
880 | }, | ||
881 | { | ||
882 | /* fixed_bit_values */ | ||
883 | 0x0000000010000000ULL, | ||
884 | 0x1000000000000000ULL, | ||
885 | -1ULL, | ||
886 | -1ULL, | ||
887 | -1ULL | ||
888 | } | ||
889 | }, | ||
890 | { "addlis", TILE_OPC_ADDLIS, 0x3 /* pipes */, 3 /* num_operands */, | ||
891 | TREG_SN, /* implicitly_written_register */ | ||
892 | 1, /* can_bundle */ | ||
893 | { | ||
894 | /* operands */ | ||
895 | { 7, 8, 4 }, | ||
896 | { 9, 10, 5 }, | ||
897 | { 0, }, | ||
898 | { 0, }, | ||
899 | { 0, } | ||
900 | }, | ||
901 | { | ||
902 | /* fixed_bit_masks */ | ||
903 | 0x8000000070000000ULL, | ||
904 | 0xf800000000000000ULL, | ||
905 | 0ULL, | ||
906 | 0ULL, | ||
907 | 0ULL | ||
908 | }, | ||
909 | { | ||
910 | /* fixed_bit_values */ | ||
911 | 0x0000000010000000ULL, | ||
912 | 0x1000000000000000ULL, | ||
913 | -1ULL, | ||
914 | -1ULL, | ||
915 | -1ULL | ||
916 | } | ||
917 | }, | ||
918 | { "adds", TILE_OPC_ADDS, 0x3 /* pipes */, 3 /* num_operands */, | ||
919 | TREG_ZERO, /* implicitly_written_register */ | ||
920 | 1, /* can_bundle */ | ||
921 | { | ||
922 | /* operands */ | ||
923 | { 7, 8, 16 }, | ||
924 | { 9, 10, 17 }, | ||
925 | { 0, }, | ||
926 | { 0, }, | ||
927 | { 0, } | ||
928 | }, | ||
929 | { | ||
930 | /* fixed_bit_masks */ | ||
931 | 0x800000007ffc0000ULL, | ||
932 | 0xfffe000000000000ULL, | ||
933 | 0ULL, | ||
934 | 0ULL, | ||
935 | 0ULL | ||
936 | }, | ||
937 | { | ||
938 | /* fixed_bit_values */ | ||
939 | 0x0000000001800000ULL, | ||
940 | 0x0884000000000000ULL, | ||
941 | -1ULL, | ||
942 | -1ULL, | ||
943 | -1ULL | ||
944 | } | ||
945 | }, | ||
946 | { "adds.sn", TILE_OPC_ADDS_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
947 | TREG_SN, /* implicitly_written_register */ | ||
948 | 1, /* can_bundle */ | ||
949 | { | ||
950 | /* operands */ | ||
951 | { 7, 8, 16 }, | ||
952 | { 9, 10, 17 }, | ||
953 | { 0, }, | ||
954 | { 0, }, | ||
955 | { 0, } | ||
956 | }, | ||
957 | { | ||
958 | /* fixed_bit_masks */ | ||
959 | 0x800000007ffc0000ULL, | ||
960 | 0xfffe000000000000ULL, | ||
961 | 0ULL, | ||
962 | 0ULL, | ||
963 | 0ULL | ||
964 | }, | ||
965 | { | ||
966 | /* fixed_bit_values */ | ||
967 | 0x0000000009800000ULL, | ||
968 | 0x0c84000000000000ULL, | ||
969 | -1ULL, | ||
970 | -1ULL, | ||
971 | -1ULL | ||
972 | } | ||
973 | }, | ||
974 | { "adiffb_u", TILE_OPC_ADIFFB_U, 0x1 /* pipes */, 3 /* num_operands */, | ||
975 | TREG_ZERO, /* implicitly_written_register */ | ||
976 | 1, /* can_bundle */ | ||
977 | { | ||
978 | /* operands */ | ||
979 | { 7, 8, 16 }, | ||
980 | { 0, }, | ||
981 | { 0, }, | ||
982 | { 0, }, | ||
983 | { 0, } | ||
984 | }, | ||
985 | { | ||
986 | /* fixed_bit_masks */ | ||
987 | 0x800000007ffc0000ULL, | ||
988 | 0ULL, | ||
989 | 0ULL, | ||
990 | 0ULL, | ||
991 | 0ULL | ||
992 | }, | ||
993 | { | ||
994 | /* fixed_bit_values */ | ||
995 | 0x0000000000100000ULL, | ||
996 | -1ULL, | ||
997 | -1ULL, | ||
998 | -1ULL, | ||
999 | -1ULL | ||
1000 | } | ||
1001 | }, | ||
1002 | { "adiffb_u.sn", TILE_OPC_ADIFFB_U_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
1003 | TREG_SN, /* implicitly_written_register */ | ||
1004 | 1, /* can_bundle */ | ||
1005 | { | ||
1006 | /* operands */ | ||
1007 | { 7, 8, 16 }, | ||
1008 | { 0, }, | ||
1009 | { 0, }, | ||
1010 | { 0, }, | ||
1011 | { 0, } | ||
1012 | }, | ||
1013 | { | ||
1014 | /* fixed_bit_masks */ | ||
1015 | 0x800000007ffc0000ULL, | ||
1016 | 0ULL, | ||
1017 | 0ULL, | ||
1018 | 0ULL, | ||
1019 | 0ULL | ||
1020 | }, | ||
1021 | { | ||
1022 | /* fixed_bit_values */ | ||
1023 | 0x0000000008100000ULL, | ||
1024 | -1ULL, | ||
1025 | -1ULL, | ||
1026 | -1ULL, | ||
1027 | -1ULL | ||
1028 | } | ||
1029 | }, | ||
1030 | { "adiffh", TILE_OPC_ADIFFH, 0x1 /* pipes */, 3 /* num_operands */, | ||
1031 | TREG_ZERO, /* implicitly_written_register */ | ||
1032 | 1, /* can_bundle */ | ||
1033 | { | ||
1034 | /* operands */ | ||
1035 | { 7, 8, 16 }, | ||
1036 | { 0, }, | ||
1037 | { 0, }, | ||
1038 | { 0, }, | ||
1039 | { 0, } | ||
1040 | }, | ||
1041 | { | ||
1042 | /* fixed_bit_masks */ | ||
1043 | 0x800000007ffc0000ULL, | ||
1044 | 0ULL, | ||
1045 | 0ULL, | ||
1046 | 0ULL, | ||
1047 | 0ULL | ||
1048 | }, | ||
1049 | { | ||
1050 | /* fixed_bit_values */ | ||
1051 | 0x0000000000140000ULL, | ||
1052 | -1ULL, | ||
1053 | -1ULL, | ||
1054 | -1ULL, | ||
1055 | -1ULL | ||
1056 | } | ||
1057 | }, | ||
1058 | { "adiffh.sn", TILE_OPC_ADIFFH_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
1059 | TREG_SN, /* implicitly_written_register */ | ||
1060 | 1, /* can_bundle */ | ||
1061 | { | ||
1062 | /* operands */ | ||
1063 | { 7, 8, 16 }, | ||
1064 | { 0, }, | ||
1065 | { 0, }, | ||
1066 | { 0, }, | ||
1067 | { 0, } | ||
1068 | }, | ||
1069 | { | ||
1070 | /* fixed_bit_masks */ | ||
1071 | 0x800000007ffc0000ULL, | ||
1072 | 0ULL, | ||
1073 | 0ULL, | ||
1074 | 0ULL, | ||
1075 | 0ULL | ||
1076 | }, | ||
1077 | { | ||
1078 | /* fixed_bit_values */ | ||
1079 | 0x0000000008140000ULL, | ||
1080 | -1ULL, | ||
1081 | -1ULL, | ||
1082 | -1ULL, | ||
1083 | -1ULL | ||
1084 | } | ||
1085 | }, | ||
1086 | { "and", TILE_OPC_AND, 0xf /* pipes */, 3 /* num_operands */, | ||
1087 | TREG_ZERO, /* implicitly_written_register */ | ||
1088 | 1, /* can_bundle */ | ||
1089 | { | ||
1090 | /* operands */ | ||
1091 | { 7, 8, 16 }, | ||
1092 | { 9, 10, 17 }, | ||
1093 | { 11, 12, 18 }, | ||
1094 | { 13, 14, 19 }, | ||
1095 | { 0, } | ||
1096 | }, | ||
1097 | { | ||
1098 | /* fixed_bit_masks */ | ||
1099 | 0x800000007ffc0000ULL, | ||
1100 | 0xfffe000000000000ULL, | ||
1101 | 0x80000000780c0000ULL, | ||
1102 | 0xf806000000000000ULL, | ||
1103 | 0ULL | ||
1104 | }, | ||
1105 | { | ||
1106 | /* fixed_bit_values */ | ||
1107 | 0x0000000000180000ULL, | ||
1108 | 0x0808000000000000ULL, | ||
1109 | 0x8000000018000000ULL, | ||
1110 | 0x9800000000000000ULL, | ||
1111 | -1ULL | ||
1112 | } | ||
1113 | }, | ||
1114 | { "and.sn", TILE_OPC_AND_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
1115 | TREG_SN, /* implicitly_written_register */ | ||
1116 | 1, /* can_bundle */ | ||
1117 | { | ||
1118 | /* operands */ | ||
1119 | { 7, 8, 16 }, | ||
1120 | { 9, 10, 17 }, | ||
1121 | { 0, }, | ||
1122 | { 0, }, | ||
1123 | { 0, } | ||
1124 | }, | ||
1125 | { | ||
1126 | /* fixed_bit_masks */ | ||
1127 | 0x800000007ffc0000ULL, | ||
1128 | 0xfffe000000000000ULL, | ||
1129 | 0ULL, | ||
1130 | 0ULL, | ||
1131 | 0ULL | ||
1132 | }, | ||
1133 | { | ||
1134 | /* fixed_bit_values */ | ||
1135 | 0x0000000008180000ULL, | ||
1136 | 0x0c08000000000000ULL, | ||
1137 | -1ULL, | ||
1138 | -1ULL, | ||
1139 | -1ULL | ||
1140 | } | ||
1141 | }, | ||
1142 | { "andi", TILE_OPC_ANDI, 0xf /* pipes */, 3 /* num_operands */, | ||
1143 | TREG_ZERO, /* implicitly_written_register */ | ||
1144 | 1, /* can_bundle */ | ||
1145 | { | ||
1146 | /* operands */ | ||
1147 | { 7, 8, 0 }, | ||
1148 | { 9, 10, 1 }, | ||
1149 | { 11, 12, 2 }, | ||
1150 | { 13, 14, 3 }, | ||
1151 | { 0, } | ||
1152 | }, | ||
1153 | { | ||
1154 | /* fixed_bit_masks */ | ||
1155 | 0x800000007ff00000ULL, | ||
1156 | 0xfff8000000000000ULL, | ||
1157 | 0x8000000078000000ULL, | ||
1158 | 0xf800000000000000ULL, | ||
1159 | 0ULL | ||
1160 | }, | ||
1161 | { | ||
1162 | /* fixed_bit_values */ | ||
1163 | 0x0000000050100000ULL, | ||
1164 | 0x3020000000000000ULL, | ||
1165 | 0x8000000050000000ULL, | ||
1166 | 0xc000000000000000ULL, | ||
1167 | -1ULL | ||
1168 | } | ||
1169 | }, | ||
1170 | { "andi.sn", TILE_OPC_ANDI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
1171 | TREG_SN, /* implicitly_written_register */ | ||
1172 | 1, /* can_bundle */ | ||
1173 | { | ||
1174 | /* operands */ | ||
1175 | { 7, 8, 0 }, | ||
1176 | { 9, 10, 1 }, | ||
1177 | { 0, }, | ||
1178 | { 0, }, | ||
1179 | { 0, } | ||
1180 | }, | ||
1181 | { | ||
1182 | /* fixed_bit_masks */ | ||
1183 | 0x800000007ff00000ULL, | ||
1184 | 0xfff8000000000000ULL, | ||
1185 | 0ULL, | ||
1186 | 0ULL, | ||
1187 | 0ULL | ||
1188 | }, | ||
1189 | { | ||
1190 | /* fixed_bit_values */ | ||
1191 | 0x0000000058100000ULL, | ||
1192 | 0x3420000000000000ULL, | ||
1193 | -1ULL, | ||
1194 | -1ULL, | ||
1195 | -1ULL | ||
1196 | } | ||
1197 | }, | ||
1198 | { "auli", TILE_OPC_AULI, 0x3 /* pipes */, 3 /* num_operands */, | ||
1199 | TREG_ZERO, /* implicitly_written_register */ | ||
1200 | 1, /* can_bundle */ | ||
1201 | { | ||
1202 | /* operands */ | ||
1203 | { 7, 8, 4 }, | ||
1204 | { 9, 10, 5 }, | ||
1205 | { 0, }, | ||
1206 | { 0, }, | ||
1207 | { 0, } | ||
1208 | }, | ||
1209 | { | ||
1210 | /* fixed_bit_masks */ | ||
1211 | 0x8000000070000000ULL, | ||
1212 | 0xf800000000000000ULL, | ||
1213 | 0ULL, | ||
1214 | 0ULL, | ||
1215 | 0ULL | ||
1216 | }, | ||
1217 | { | ||
1218 | /* fixed_bit_values */ | ||
1219 | 0x0000000030000000ULL, | ||
1220 | 0x2000000000000000ULL, | ||
1221 | -1ULL, | ||
1222 | -1ULL, | ||
1223 | -1ULL | ||
1224 | } | ||
1225 | }, | ||
1226 | { "avgb_u", TILE_OPC_AVGB_U, 0x1 /* pipes */, 3 /* num_operands */, | ||
1227 | TREG_ZERO, /* implicitly_written_register */ | ||
1228 | 1, /* can_bundle */ | ||
1229 | { | ||
1230 | /* operands */ | ||
1231 | { 7, 8, 16 }, | ||
1232 | { 0, }, | ||
1233 | { 0, }, | ||
1234 | { 0, }, | ||
1235 | { 0, } | ||
1236 | }, | ||
1237 | { | ||
1238 | /* fixed_bit_masks */ | ||
1239 | 0x800000007ffc0000ULL, | ||
1240 | 0ULL, | ||
1241 | 0ULL, | ||
1242 | 0ULL, | ||
1243 | 0ULL | ||
1244 | }, | ||
1245 | { | ||
1246 | /* fixed_bit_values */ | ||
1247 | 0x00000000001c0000ULL, | ||
1248 | -1ULL, | ||
1249 | -1ULL, | ||
1250 | -1ULL, | ||
1251 | -1ULL | ||
1252 | } | ||
1253 | }, | ||
1254 | { "avgb_u.sn", TILE_OPC_AVGB_U_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
1255 | TREG_SN, /* implicitly_written_register */ | ||
1256 | 1, /* can_bundle */ | ||
1257 | { | ||
1258 | /* operands */ | ||
1259 | { 7, 8, 16 }, | ||
1260 | { 0, }, | ||
1261 | { 0, }, | ||
1262 | { 0, }, | ||
1263 | { 0, } | ||
1264 | }, | ||
1265 | { | ||
1266 | /* fixed_bit_masks */ | ||
1267 | 0x800000007ffc0000ULL, | ||
1268 | 0ULL, | ||
1269 | 0ULL, | ||
1270 | 0ULL, | ||
1271 | 0ULL | ||
1272 | }, | ||
1273 | { | ||
1274 | /* fixed_bit_values */ | ||
1275 | 0x00000000081c0000ULL, | ||
1276 | -1ULL, | ||
1277 | -1ULL, | ||
1278 | -1ULL, | ||
1279 | -1ULL | ||
1280 | } | ||
1281 | }, | ||
1282 | { "avgh", TILE_OPC_AVGH, 0x1 /* pipes */, 3 /* num_operands */, | ||
1283 | TREG_ZERO, /* implicitly_written_register */ | ||
1284 | 1, /* can_bundle */ | ||
1285 | { | ||
1286 | /* operands */ | ||
1287 | { 7, 8, 16 }, | ||
1288 | { 0, }, | ||
1289 | { 0, }, | ||
1290 | { 0, }, | ||
1291 | { 0, } | ||
1292 | }, | ||
1293 | { | ||
1294 | /* fixed_bit_masks */ | ||
1295 | 0x800000007ffc0000ULL, | ||
1296 | 0ULL, | ||
1297 | 0ULL, | ||
1298 | 0ULL, | ||
1299 | 0ULL | ||
1300 | }, | ||
1301 | { | ||
1302 | /* fixed_bit_values */ | ||
1303 | 0x0000000000200000ULL, | ||
1304 | -1ULL, | ||
1305 | -1ULL, | ||
1306 | -1ULL, | ||
1307 | -1ULL | ||
1308 | } | ||
1309 | }, | ||
1310 | { "avgh.sn", TILE_OPC_AVGH_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
1311 | TREG_SN, /* implicitly_written_register */ | ||
1312 | 1, /* can_bundle */ | ||
1313 | { | ||
1314 | /* operands */ | ||
1315 | { 7, 8, 16 }, | ||
1316 | { 0, }, | ||
1317 | { 0, }, | ||
1318 | { 0, }, | ||
1319 | { 0, } | ||
1320 | }, | ||
1321 | { | ||
1322 | /* fixed_bit_masks */ | ||
1323 | 0x800000007ffc0000ULL, | ||
1324 | 0ULL, | ||
1325 | 0ULL, | ||
1326 | 0ULL, | ||
1327 | 0ULL | ||
1328 | }, | ||
1329 | { | ||
1330 | /* fixed_bit_values */ | ||
1331 | 0x0000000008200000ULL, | ||
1332 | -1ULL, | ||
1333 | -1ULL, | ||
1334 | -1ULL, | ||
1335 | -1ULL | ||
1336 | } | ||
1337 | }, | ||
1338 | { "bbns", TILE_OPC_BBNS, 0x2 /* pipes */, 2 /* num_operands */, | ||
1339 | TREG_ZERO, /* implicitly_written_register */ | ||
1340 | 1, /* can_bundle */ | ||
1341 | { | ||
1342 | /* operands */ | ||
1343 | { 0, }, | ||
1344 | { 10, 20 }, | ||
1345 | { 0, }, | ||
1346 | { 0, }, | ||
1347 | { 0, } | ||
1348 | }, | ||
1349 | { | ||
1350 | /* fixed_bit_masks */ | ||
1351 | 0ULL, | ||
1352 | 0xfc00000780000000ULL, | ||
1353 | 0ULL, | ||
1354 | 0ULL, | ||
1355 | 0ULL | ||
1356 | }, | ||
1357 | { | ||
1358 | /* fixed_bit_values */ | ||
1359 | -1ULL, | ||
1360 | 0x2800000700000000ULL, | ||
1361 | -1ULL, | ||
1362 | -1ULL, | ||
1363 | -1ULL | ||
1364 | } | ||
1365 | }, | ||
1366 | { "bbns.sn", TILE_OPC_BBNS_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1367 | TREG_SN, /* implicitly_written_register */ | ||
1368 | 1, /* can_bundle */ | ||
1369 | { | ||
1370 | /* operands */ | ||
1371 | { 0, }, | ||
1372 | { 10, 20 }, | ||
1373 | { 0, }, | ||
1374 | { 0, }, | ||
1375 | { 0, } | ||
1376 | }, | ||
1377 | { | ||
1378 | /* fixed_bit_masks */ | ||
1379 | 0ULL, | ||
1380 | 0xfc00000780000000ULL, | ||
1381 | 0ULL, | ||
1382 | 0ULL, | ||
1383 | 0ULL | ||
1384 | }, | ||
1385 | { | ||
1386 | /* fixed_bit_values */ | ||
1387 | -1ULL, | ||
1388 | 0x2c00000700000000ULL, | ||
1389 | -1ULL, | ||
1390 | -1ULL, | ||
1391 | -1ULL | ||
1392 | } | ||
1393 | }, | ||
1394 | { "bbnst", TILE_OPC_BBNST, 0x2 /* pipes */, 2 /* num_operands */, | ||
1395 | TREG_ZERO, /* implicitly_written_register */ | ||
1396 | 1, /* can_bundle */ | ||
1397 | { | ||
1398 | /* operands */ | ||
1399 | { 0, }, | ||
1400 | { 10, 20 }, | ||
1401 | { 0, }, | ||
1402 | { 0, }, | ||
1403 | { 0, } | ||
1404 | }, | ||
1405 | { | ||
1406 | /* fixed_bit_masks */ | ||
1407 | 0ULL, | ||
1408 | 0xfc00000780000000ULL, | ||
1409 | 0ULL, | ||
1410 | 0ULL, | ||
1411 | 0ULL | ||
1412 | }, | ||
1413 | { | ||
1414 | /* fixed_bit_values */ | ||
1415 | -1ULL, | ||
1416 | 0x2800000780000000ULL, | ||
1417 | -1ULL, | ||
1418 | -1ULL, | ||
1419 | -1ULL | ||
1420 | } | ||
1421 | }, | ||
1422 | { "bbnst.sn", TILE_OPC_BBNST_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1423 | TREG_SN, /* implicitly_written_register */ | ||
1424 | 1, /* can_bundle */ | ||
1425 | { | ||
1426 | /* operands */ | ||
1427 | { 0, }, | ||
1428 | { 10, 20 }, | ||
1429 | { 0, }, | ||
1430 | { 0, }, | ||
1431 | { 0, } | ||
1432 | }, | ||
1433 | { | ||
1434 | /* fixed_bit_masks */ | ||
1435 | 0ULL, | ||
1436 | 0xfc00000780000000ULL, | ||
1437 | 0ULL, | ||
1438 | 0ULL, | ||
1439 | 0ULL | ||
1440 | }, | ||
1441 | { | ||
1442 | /* fixed_bit_values */ | ||
1443 | -1ULL, | ||
1444 | 0x2c00000780000000ULL, | ||
1445 | -1ULL, | ||
1446 | -1ULL, | ||
1447 | -1ULL | ||
1448 | } | ||
1449 | }, | ||
1450 | { "bbs", TILE_OPC_BBS, 0x2 /* pipes */, 2 /* num_operands */, | ||
1451 | TREG_ZERO, /* implicitly_written_register */ | ||
1452 | 1, /* can_bundle */ | ||
1453 | { | ||
1454 | /* operands */ | ||
1455 | { 0, }, | ||
1456 | { 10, 20 }, | ||
1457 | { 0, }, | ||
1458 | { 0, }, | ||
1459 | { 0, } | ||
1460 | }, | ||
1461 | { | ||
1462 | /* fixed_bit_masks */ | ||
1463 | 0ULL, | ||
1464 | 0xfc00000780000000ULL, | ||
1465 | 0ULL, | ||
1466 | 0ULL, | ||
1467 | 0ULL | ||
1468 | }, | ||
1469 | { | ||
1470 | /* fixed_bit_values */ | ||
1471 | -1ULL, | ||
1472 | 0x2800000600000000ULL, | ||
1473 | -1ULL, | ||
1474 | -1ULL, | ||
1475 | -1ULL | ||
1476 | } | ||
1477 | }, | ||
1478 | { "bbs.sn", TILE_OPC_BBS_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1479 | TREG_SN, /* implicitly_written_register */ | ||
1480 | 1, /* can_bundle */ | ||
1481 | { | ||
1482 | /* operands */ | ||
1483 | { 0, }, | ||
1484 | { 10, 20 }, | ||
1485 | { 0, }, | ||
1486 | { 0, }, | ||
1487 | { 0, } | ||
1488 | }, | ||
1489 | { | ||
1490 | /* fixed_bit_masks */ | ||
1491 | 0ULL, | ||
1492 | 0xfc00000780000000ULL, | ||
1493 | 0ULL, | ||
1494 | 0ULL, | ||
1495 | 0ULL | ||
1496 | }, | ||
1497 | { | ||
1498 | /* fixed_bit_values */ | ||
1499 | -1ULL, | ||
1500 | 0x2c00000600000000ULL, | ||
1501 | -1ULL, | ||
1502 | -1ULL, | ||
1503 | -1ULL | ||
1504 | } | ||
1505 | }, | ||
1506 | { "bbst", TILE_OPC_BBST, 0x2 /* pipes */, 2 /* num_operands */, | ||
1507 | TREG_ZERO, /* implicitly_written_register */ | ||
1508 | 1, /* can_bundle */ | ||
1509 | { | ||
1510 | /* operands */ | ||
1511 | { 0, }, | ||
1512 | { 10, 20 }, | ||
1513 | { 0, }, | ||
1514 | { 0, }, | ||
1515 | { 0, } | ||
1516 | }, | ||
1517 | { | ||
1518 | /* fixed_bit_masks */ | ||
1519 | 0ULL, | ||
1520 | 0xfc00000780000000ULL, | ||
1521 | 0ULL, | ||
1522 | 0ULL, | ||
1523 | 0ULL | ||
1524 | }, | ||
1525 | { | ||
1526 | /* fixed_bit_values */ | ||
1527 | -1ULL, | ||
1528 | 0x2800000680000000ULL, | ||
1529 | -1ULL, | ||
1530 | -1ULL, | ||
1531 | -1ULL | ||
1532 | } | ||
1533 | }, | ||
1534 | { "bbst.sn", TILE_OPC_BBST_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1535 | TREG_SN, /* implicitly_written_register */ | ||
1536 | 1, /* can_bundle */ | ||
1537 | { | ||
1538 | /* operands */ | ||
1539 | { 0, }, | ||
1540 | { 10, 20 }, | ||
1541 | { 0, }, | ||
1542 | { 0, }, | ||
1543 | { 0, } | ||
1544 | }, | ||
1545 | { | ||
1546 | /* fixed_bit_masks */ | ||
1547 | 0ULL, | ||
1548 | 0xfc00000780000000ULL, | ||
1549 | 0ULL, | ||
1550 | 0ULL, | ||
1551 | 0ULL | ||
1552 | }, | ||
1553 | { | ||
1554 | /* fixed_bit_values */ | ||
1555 | -1ULL, | ||
1556 | 0x2c00000680000000ULL, | ||
1557 | -1ULL, | ||
1558 | -1ULL, | ||
1559 | -1ULL | ||
1560 | } | ||
1561 | }, | ||
1562 | { "bgez", TILE_OPC_BGEZ, 0x2 /* pipes */, 2 /* num_operands */, | ||
1563 | TREG_ZERO, /* implicitly_written_register */ | ||
1564 | 1, /* can_bundle */ | ||
1565 | { | ||
1566 | /* operands */ | ||
1567 | { 0, }, | ||
1568 | { 10, 20 }, | ||
1569 | { 0, }, | ||
1570 | { 0, }, | ||
1571 | { 0, } | ||
1572 | }, | ||
1573 | { | ||
1574 | /* fixed_bit_masks */ | ||
1575 | 0ULL, | ||
1576 | 0xfc00000780000000ULL, | ||
1577 | 0ULL, | ||
1578 | 0ULL, | ||
1579 | 0ULL | ||
1580 | }, | ||
1581 | { | ||
1582 | /* fixed_bit_values */ | ||
1583 | -1ULL, | ||
1584 | 0x2800000300000000ULL, | ||
1585 | -1ULL, | ||
1586 | -1ULL, | ||
1587 | -1ULL | ||
1588 | } | ||
1589 | }, | ||
1590 | { "bgez.sn", TILE_OPC_BGEZ_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1591 | TREG_SN, /* implicitly_written_register */ | ||
1592 | 1, /* can_bundle */ | ||
1593 | { | ||
1594 | /* operands */ | ||
1595 | { 0, }, | ||
1596 | { 10, 20 }, | ||
1597 | { 0, }, | ||
1598 | { 0, }, | ||
1599 | { 0, } | ||
1600 | }, | ||
1601 | { | ||
1602 | /* fixed_bit_masks */ | ||
1603 | 0ULL, | ||
1604 | 0xfc00000780000000ULL, | ||
1605 | 0ULL, | ||
1606 | 0ULL, | ||
1607 | 0ULL | ||
1608 | }, | ||
1609 | { | ||
1610 | /* fixed_bit_values */ | ||
1611 | -1ULL, | ||
1612 | 0x2c00000300000000ULL, | ||
1613 | -1ULL, | ||
1614 | -1ULL, | ||
1615 | -1ULL | ||
1616 | } | ||
1617 | }, | ||
1618 | { "bgezt", TILE_OPC_BGEZT, 0x2 /* pipes */, 2 /* num_operands */, | ||
1619 | TREG_ZERO, /* implicitly_written_register */ | ||
1620 | 1, /* can_bundle */ | ||
1621 | { | ||
1622 | /* operands */ | ||
1623 | { 0, }, | ||
1624 | { 10, 20 }, | ||
1625 | { 0, }, | ||
1626 | { 0, }, | ||
1627 | { 0, } | ||
1628 | }, | ||
1629 | { | ||
1630 | /* fixed_bit_masks */ | ||
1631 | 0ULL, | ||
1632 | 0xfc00000780000000ULL, | ||
1633 | 0ULL, | ||
1634 | 0ULL, | ||
1635 | 0ULL | ||
1636 | }, | ||
1637 | { | ||
1638 | /* fixed_bit_values */ | ||
1639 | -1ULL, | ||
1640 | 0x2800000380000000ULL, | ||
1641 | -1ULL, | ||
1642 | -1ULL, | ||
1643 | -1ULL | ||
1644 | } | ||
1645 | }, | ||
1646 | { "bgezt.sn", TILE_OPC_BGEZT_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1647 | TREG_SN, /* implicitly_written_register */ | ||
1648 | 1, /* can_bundle */ | ||
1649 | { | ||
1650 | /* operands */ | ||
1651 | { 0, }, | ||
1652 | { 10, 20 }, | ||
1653 | { 0, }, | ||
1654 | { 0, }, | ||
1655 | { 0, } | ||
1656 | }, | ||
1657 | { | ||
1658 | /* fixed_bit_masks */ | ||
1659 | 0ULL, | ||
1660 | 0xfc00000780000000ULL, | ||
1661 | 0ULL, | ||
1662 | 0ULL, | ||
1663 | 0ULL | ||
1664 | }, | ||
1665 | { | ||
1666 | /* fixed_bit_values */ | ||
1667 | -1ULL, | ||
1668 | 0x2c00000380000000ULL, | ||
1669 | -1ULL, | ||
1670 | -1ULL, | ||
1671 | -1ULL | ||
1672 | } | ||
1673 | }, | ||
1674 | { "bgz", TILE_OPC_BGZ, 0x2 /* pipes */, 2 /* num_operands */, | ||
1675 | TREG_ZERO, /* implicitly_written_register */ | ||
1676 | 1, /* can_bundle */ | ||
1677 | { | ||
1678 | /* operands */ | ||
1679 | { 0, }, | ||
1680 | { 10, 20 }, | ||
1681 | { 0, }, | ||
1682 | { 0, }, | ||
1683 | { 0, } | ||
1684 | }, | ||
1685 | { | ||
1686 | /* fixed_bit_masks */ | ||
1687 | 0ULL, | ||
1688 | 0xfc00000780000000ULL, | ||
1689 | 0ULL, | ||
1690 | 0ULL, | ||
1691 | 0ULL | ||
1692 | }, | ||
1693 | { | ||
1694 | /* fixed_bit_values */ | ||
1695 | -1ULL, | ||
1696 | 0x2800000200000000ULL, | ||
1697 | -1ULL, | ||
1698 | -1ULL, | ||
1699 | -1ULL | ||
1700 | } | ||
1701 | }, | ||
1702 | { "bgz.sn", TILE_OPC_BGZ_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1703 | TREG_SN, /* implicitly_written_register */ | ||
1704 | 1, /* can_bundle */ | ||
1705 | { | ||
1706 | /* operands */ | ||
1707 | { 0, }, | ||
1708 | { 10, 20 }, | ||
1709 | { 0, }, | ||
1710 | { 0, }, | ||
1711 | { 0, } | ||
1712 | }, | ||
1713 | { | ||
1714 | /* fixed_bit_masks */ | ||
1715 | 0ULL, | ||
1716 | 0xfc00000780000000ULL, | ||
1717 | 0ULL, | ||
1718 | 0ULL, | ||
1719 | 0ULL | ||
1720 | }, | ||
1721 | { | ||
1722 | /* fixed_bit_values */ | ||
1723 | -1ULL, | ||
1724 | 0x2c00000200000000ULL, | ||
1725 | -1ULL, | ||
1726 | -1ULL, | ||
1727 | -1ULL | ||
1728 | } | ||
1729 | }, | ||
1730 | { "bgzt", TILE_OPC_BGZT, 0x2 /* pipes */, 2 /* num_operands */, | ||
1731 | TREG_ZERO, /* implicitly_written_register */ | ||
1732 | 1, /* can_bundle */ | ||
1733 | { | ||
1734 | /* operands */ | ||
1735 | { 0, }, | ||
1736 | { 10, 20 }, | ||
1737 | { 0, }, | ||
1738 | { 0, }, | ||
1739 | { 0, } | ||
1740 | }, | ||
1741 | { | ||
1742 | /* fixed_bit_masks */ | ||
1743 | 0ULL, | ||
1744 | 0xfc00000780000000ULL, | ||
1745 | 0ULL, | ||
1746 | 0ULL, | ||
1747 | 0ULL | ||
1748 | }, | ||
1749 | { | ||
1750 | /* fixed_bit_values */ | ||
1751 | -1ULL, | ||
1752 | 0x2800000280000000ULL, | ||
1753 | -1ULL, | ||
1754 | -1ULL, | ||
1755 | -1ULL | ||
1756 | } | ||
1757 | }, | ||
1758 | { "bgzt.sn", TILE_OPC_BGZT_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1759 | TREG_SN, /* implicitly_written_register */ | ||
1760 | 1, /* can_bundle */ | ||
1761 | { | ||
1762 | /* operands */ | ||
1763 | { 0, }, | ||
1764 | { 10, 20 }, | ||
1765 | { 0, }, | ||
1766 | { 0, }, | ||
1767 | { 0, } | ||
1768 | }, | ||
1769 | { | ||
1770 | /* fixed_bit_masks */ | ||
1771 | 0ULL, | ||
1772 | 0xfc00000780000000ULL, | ||
1773 | 0ULL, | ||
1774 | 0ULL, | ||
1775 | 0ULL | ||
1776 | }, | ||
1777 | { | ||
1778 | /* fixed_bit_values */ | ||
1779 | -1ULL, | ||
1780 | 0x2c00000280000000ULL, | ||
1781 | -1ULL, | ||
1782 | -1ULL, | ||
1783 | -1ULL | ||
1784 | } | ||
1785 | }, | ||
1786 | { "bitx", TILE_OPC_BITX, 0x5 /* pipes */, 2 /* num_operands */, | ||
1787 | TREG_ZERO, /* implicitly_written_register */ | ||
1788 | 1, /* can_bundle */ | ||
1789 | { | ||
1790 | /* operands */ | ||
1791 | { 7, 8 }, | ||
1792 | { 0, }, | ||
1793 | { 11, 12 }, | ||
1794 | { 0, }, | ||
1795 | { 0, } | ||
1796 | }, | ||
1797 | { | ||
1798 | /* fixed_bit_masks */ | ||
1799 | 0x800000007ffff000ULL, | ||
1800 | 0ULL, | ||
1801 | 0x80000000780ff000ULL, | ||
1802 | 0ULL, | ||
1803 | 0ULL | ||
1804 | }, | ||
1805 | { | ||
1806 | /* fixed_bit_values */ | ||
1807 | 0x0000000070161000ULL, | ||
1808 | -1ULL, | ||
1809 | 0x80000000680a1000ULL, | ||
1810 | -1ULL, | ||
1811 | -1ULL | ||
1812 | } | ||
1813 | }, | ||
1814 | { "bitx.sn", TILE_OPC_BITX_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
1815 | TREG_SN, /* implicitly_written_register */ | ||
1816 | 1, /* can_bundle */ | ||
1817 | { | ||
1818 | /* operands */ | ||
1819 | { 7, 8 }, | ||
1820 | { 0, }, | ||
1821 | { 0, }, | ||
1822 | { 0, }, | ||
1823 | { 0, } | ||
1824 | }, | ||
1825 | { | ||
1826 | /* fixed_bit_masks */ | ||
1827 | 0x800000007ffff000ULL, | ||
1828 | 0ULL, | ||
1829 | 0ULL, | ||
1830 | 0ULL, | ||
1831 | 0ULL | ||
1832 | }, | ||
1833 | { | ||
1834 | /* fixed_bit_values */ | ||
1835 | 0x0000000078161000ULL, | ||
1836 | -1ULL, | ||
1837 | -1ULL, | ||
1838 | -1ULL, | ||
1839 | -1ULL | ||
1840 | } | ||
1841 | }, | ||
1842 | { "blez", TILE_OPC_BLEZ, 0x2 /* pipes */, 2 /* num_operands */, | ||
1843 | TREG_ZERO, /* implicitly_written_register */ | ||
1844 | 1, /* can_bundle */ | ||
1845 | { | ||
1846 | /* operands */ | ||
1847 | { 0, }, | ||
1848 | { 10, 20 }, | ||
1849 | { 0, }, | ||
1850 | { 0, }, | ||
1851 | { 0, } | ||
1852 | }, | ||
1853 | { | ||
1854 | /* fixed_bit_masks */ | ||
1855 | 0ULL, | ||
1856 | 0xfc00000780000000ULL, | ||
1857 | 0ULL, | ||
1858 | 0ULL, | ||
1859 | 0ULL | ||
1860 | }, | ||
1861 | { | ||
1862 | /* fixed_bit_values */ | ||
1863 | -1ULL, | ||
1864 | 0x2800000500000000ULL, | ||
1865 | -1ULL, | ||
1866 | -1ULL, | ||
1867 | -1ULL | ||
1868 | } | ||
1869 | }, | ||
1870 | { "blez.sn", TILE_OPC_BLEZ_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1871 | TREG_SN, /* implicitly_written_register */ | ||
1872 | 1, /* can_bundle */ | ||
1873 | { | ||
1874 | /* operands */ | ||
1875 | { 0, }, | ||
1876 | { 10, 20 }, | ||
1877 | { 0, }, | ||
1878 | { 0, }, | ||
1879 | { 0, } | ||
1880 | }, | ||
1881 | { | ||
1882 | /* fixed_bit_masks */ | ||
1883 | 0ULL, | ||
1884 | 0xfc00000780000000ULL, | ||
1885 | 0ULL, | ||
1886 | 0ULL, | ||
1887 | 0ULL | ||
1888 | }, | ||
1889 | { | ||
1890 | /* fixed_bit_values */ | ||
1891 | -1ULL, | ||
1892 | 0x2c00000500000000ULL, | ||
1893 | -1ULL, | ||
1894 | -1ULL, | ||
1895 | -1ULL | ||
1896 | } | ||
1897 | }, | ||
1898 | { "blezt", TILE_OPC_BLEZT, 0x2 /* pipes */, 2 /* num_operands */, | ||
1899 | TREG_ZERO, /* implicitly_written_register */ | ||
1900 | 1, /* can_bundle */ | ||
1901 | { | ||
1902 | /* operands */ | ||
1903 | { 0, }, | ||
1904 | { 10, 20 }, | ||
1905 | { 0, }, | ||
1906 | { 0, }, | ||
1907 | { 0, } | ||
1908 | }, | ||
1909 | { | ||
1910 | /* fixed_bit_masks */ | ||
1911 | 0ULL, | ||
1912 | 0xfc00000780000000ULL, | ||
1913 | 0ULL, | ||
1914 | 0ULL, | ||
1915 | 0ULL | ||
1916 | }, | ||
1917 | { | ||
1918 | /* fixed_bit_values */ | ||
1919 | -1ULL, | ||
1920 | 0x2800000580000000ULL, | ||
1921 | -1ULL, | ||
1922 | -1ULL, | ||
1923 | -1ULL | ||
1924 | } | ||
1925 | }, | ||
1926 | { "blezt.sn", TILE_OPC_BLEZT_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1927 | TREG_SN, /* implicitly_written_register */ | ||
1928 | 1, /* can_bundle */ | ||
1929 | { | ||
1930 | /* operands */ | ||
1931 | { 0, }, | ||
1932 | { 10, 20 }, | ||
1933 | { 0, }, | ||
1934 | { 0, }, | ||
1935 | { 0, } | ||
1936 | }, | ||
1937 | { | ||
1938 | /* fixed_bit_masks */ | ||
1939 | 0ULL, | ||
1940 | 0xfc00000780000000ULL, | ||
1941 | 0ULL, | ||
1942 | 0ULL, | ||
1943 | 0ULL | ||
1944 | }, | ||
1945 | { | ||
1946 | /* fixed_bit_values */ | ||
1947 | -1ULL, | ||
1948 | 0x2c00000580000000ULL, | ||
1949 | -1ULL, | ||
1950 | -1ULL, | ||
1951 | -1ULL | ||
1952 | } | ||
1953 | }, | ||
1954 | { "blz", TILE_OPC_BLZ, 0x2 /* pipes */, 2 /* num_operands */, | ||
1955 | TREG_ZERO, /* implicitly_written_register */ | ||
1956 | 1, /* can_bundle */ | ||
1957 | { | ||
1958 | /* operands */ | ||
1959 | { 0, }, | ||
1960 | { 10, 20 }, | ||
1961 | { 0, }, | ||
1962 | { 0, }, | ||
1963 | { 0, } | ||
1964 | }, | ||
1965 | { | ||
1966 | /* fixed_bit_masks */ | ||
1967 | 0ULL, | ||
1968 | 0xfc00000780000000ULL, | ||
1969 | 0ULL, | ||
1970 | 0ULL, | ||
1971 | 0ULL | ||
1972 | }, | ||
1973 | { | ||
1974 | /* fixed_bit_values */ | ||
1975 | -1ULL, | ||
1976 | 0x2800000400000000ULL, | ||
1977 | -1ULL, | ||
1978 | -1ULL, | ||
1979 | -1ULL | ||
1980 | } | ||
1981 | }, | ||
1982 | { "blz.sn", TILE_OPC_BLZ_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
1983 | TREG_SN, /* implicitly_written_register */ | ||
1984 | 1, /* can_bundle */ | ||
1985 | { | ||
1986 | /* operands */ | ||
1987 | { 0, }, | ||
1988 | { 10, 20 }, | ||
1989 | { 0, }, | ||
1990 | { 0, }, | ||
1991 | { 0, } | ||
1992 | }, | ||
1993 | { | ||
1994 | /* fixed_bit_masks */ | ||
1995 | 0ULL, | ||
1996 | 0xfc00000780000000ULL, | ||
1997 | 0ULL, | ||
1998 | 0ULL, | ||
1999 | 0ULL | ||
2000 | }, | ||
2001 | { | ||
2002 | /* fixed_bit_values */ | ||
2003 | -1ULL, | ||
2004 | 0x2c00000400000000ULL, | ||
2005 | -1ULL, | ||
2006 | -1ULL, | ||
2007 | -1ULL | ||
2008 | } | ||
2009 | }, | ||
2010 | { "blzt", TILE_OPC_BLZT, 0x2 /* pipes */, 2 /* num_operands */, | ||
2011 | TREG_ZERO, /* implicitly_written_register */ | ||
2012 | 1, /* can_bundle */ | ||
2013 | { | ||
2014 | /* operands */ | ||
2015 | { 0, }, | ||
2016 | { 10, 20 }, | ||
2017 | { 0, }, | ||
2018 | { 0, }, | ||
2019 | { 0, } | ||
2020 | }, | ||
2021 | { | ||
2022 | /* fixed_bit_masks */ | ||
2023 | 0ULL, | ||
2024 | 0xfc00000780000000ULL, | ||
2025 | 0ULL, | ||
2026 | 0ULL, | ||
2027 | 0ULL | ||
2028 | }, | ||
2029 | { | ||
2030 | /* fixed_bit_values */ | ||
2031 | -1ULL, | ||
2032 | 0x2800000480000000ULL, | ||
2033 | -1ULL, | ||
2034 | -1ULL, | ||
2035 | -1ULL | ||
2036 | } | ||
2037 | }, | ||
2038 | { "blzt.sn", TILE_OPC_BLZT_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
2039 | TREG_SN, /* implicitly_written_register */ | ||
2040 | 1, /* can_bundle */ | ||
2041 | { | ||
2042 | /* operands */ | ||
2043 | { 0, }, | ||
2044 | { 10, 20 }, | ||
2045 | { 0, }, | ||
2046 | { 0, }, | ||
2047 | { 0, } | ||
2048 | }, | ||
2049 | { | ||
2050 | /* fixed_bit_masks */ | ||
2051 | 0ULL, | ||
2052 | 0xfc00000780000000ULL, | ||
2053 | 0ULL, | ||
2054 | 0ULL, | ||
2055 | 0ULL | ||
2056 | }, | ||
2057 | { | ||
2058 | /* fixed_bit_values */ | ||
2059 | -1ULL, | ||
2060 | 0x2c00000480000000ULL, | ||
2061 | -1ULL, | ||
2062 | -1ULL, | ||
2063 | -1ULL | ||
2064 | } | ||
2065 | }, | ||
2066 | { "bnz", TILE_OPC_BNZ, 0x2 /* pipes */, 2 /* num_operands */, | ||
2067 | TREG_ZERO, /* implicitly_written_register */ | ||
2068 | 1, /* can_bundle */ | ||
2069 | { | ||
2070 | /* operands */ | ||
2071 | { 0, }, | ||
2072 | { 10, 20 }, | ||
2073 | { 0, }, | ||
2074 | { 0, }, | ||
2075 | { 0, } | ||
2076 | }, | ||
2077 | { | ||
2078 | /* fixed_bit_masks */ | ||
2079 | 0ULL, | ||
2080 | 0xfc00000780000000ULL, | ||
2081 | 0ULL, | ||
2082 | 0ULL, | ||
2083 | 0ULL | ||
2084 | }, | ||
2085 | { | ||
2086 | /* fixed_bit_values */ | ||
2087 | -1ULL, | ||
2088 | 0x2800000100000000ULL, | ||
2089 | -1ULL, | ||
2090 | -1ULL, | ||
2091 | -1ULL | ||
2092 | } | ||
2093 | }, | ||
2094 | { "bnz.sn", TILE_OPC_BNZ_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
2095 | TREG_SN, /* implicitly_written_register */ | ||
2096 | 1, /* can_bundle */ | ||
2097 | { | ||
2098 | /* operands */ | ||
2099 | { 0, }, | ||
2100 | { 10, 20 }, | ||
2101 | { 0, }, | ||
2102 | { 0, }, | ||
2103 | { 0, } | ||
2104 | }, | ||
2105 | { | ||
2106 | /* fixed_bit_masks */ | ||
2107 | 0ULL, | ||
2108 | 0xfc00000780000000ULL, | ||
2109 | 0ULL, | ||
2110 | 0ULL, | ||
2111 | 0ULL | ||
2112 | }, | ||
2113 | { | ||
2114 | /* fixed_bit_values */ | ||
2115 | -1ULL, | ||
2116 | 0x2c00000100000000ULL, | ||
2117 | -1ULL, | ||
2118 | -1ULL, | ||
2119 | -1ULL | ||
2120 | } | ||
2121 | }, | ||
2122 | { "bnzt", TILE_OPC_BNZT, 0x2 /* pipes */, 2 /* num_operands */, | ||
2123 | TREG_ZERO, /* implicitly_written_register */ | ||
2124 | 1, /* can_bundle */ | ||
2125 | { | ||
2126 | /* operands */ | ||
2127 | { 0, }, | ||
2128 | { 10, 20 }, | ||
2129 | { 0, }, | ||
2130 | { 0, }, | ||
2131 | { 0, } | ||
2132 | }, | ||
2133 | { | ||
2134 | /* fixed_bit_masks */ | ||
2135 | 0ULL, | ||
2136 | 0xfc00000780000000ULL, | ||
2137 | 0ULL, | ||
2138 | 0ULL, | ||
2139 | 0ULL | ||
2140 | }, | ||
2141 | { | ||
2142 | /* fixed_bit_values */ | ||
2143 | -1ULL, | ||
2144 | 0x2800000180000000ULL, | ||
2145 | -1ULL, | ||
2146 | -1ULL, | ||
2147 | -1ULL | ||
2148 | } | ||
2149 | }, | ||
2150 | { "bnzt.sn", TILE_OPC_BNZT_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
2151 | TREG_SN, /* implicitly_written_register */ | ||
2152 | 1, /* can_bundle */ | ||
2153 | { | ||
2154 | /* operands */ | ||
2155 | { 0, }, | ||
2156 | { 10, 20 }, | ||
2157 | { 0, }, | ||
2158 | { 0, }, | ||
2159 | { 0, } | ||
2160 | }, | ||
2161 | { | ||
2162 | /* fixed_bit_masks */ | ||
2163 | 0ULL, | ||
2164 | 0xfc00000780000000ULL, | ||
2165 | 0ULL, | ||
2166 | 0ULL, | ||
2167 | 0ULL | ||
2168 | }, | ||
2169 | { | ||
2170 | /* fixed_bit_values */ | ||
2171 | -1ULL, | ||
2172 | 0x2c00000180000000ULL, | ||
2173 | -1ULL, | ||
2174 | -1ULL, | ||
2175 | -1ULL | ||
2176 | } | ||
2177 | }, | ||
2178 | { "bytex", TILE_OPC_BYTEX, 0x5 /* pipes */, 2 /* num_operands */, | ||
2179 | TREG_ZERO, /* implicitly_written_register */ | ||
2180 | 1, /* can_bundle */ | ||
2181 | { | ||
2182 | /* operands */ | ||
2183 | { 7, 8 }, | ||
2184 | { 0, }, | ||
2185 | { 11, 12 }, | ||
2186 | { 0, }, | ||
2187 | { 0, } | ||
2188 | }, | ||
2189 | { | ||
2190 | /* fixed_bit_masks */ | ||
2191 | 0x800000007ffff000ULL, | ||
2192 | 0ULL, | ||
2193 | 0x80000000780ff000ULL, | ||
2194 | 0ULL, | ||
2195 | 0ULL | ||
2196 | }, | ||
2197 | { | ||
2198 | /* fixed_bit_values */ | ||
2199 | 0x0000000070162000ULL, | ||
2200 | -1ULL, | ||
2201 | 0x80000000680a2000ULL, | ||
2202 | -1ULL, | ||
2203 | -1ULL | ||
2204 | } | ||
2205 | }, | ||
2206 | { "bytex.sn", TILE_OPC_BYTEX_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
2207 | TREG_SN, /* implicitly_written_register */ | ||
2208 | 1, /* can_bundle */ | ||
2209 | { | ||
2210 | /* operands */ | ||
2211 | { 7, 8 }, | ||
2212 | { 0, }, | ||
2213 | { 0, }, | ||
2214 | { 0, }, | ||
2215 | { 0, } | ||
2216 | }, | ||
2217 | { | ||
2218 | /* fixed_bit_masks */ | ||
2219 | 0x800000007ffff000ULL, | ||
2220 | 0ULL, | ||
2221 | 0ULL, | ||
2222 | 0ULL, | ||
2223 | 0ULL | ||
2224 | }, | ||
2225 | { | ||
2226 | /* fixed_bit_values */ | ||
2227 | 0x0000000078162000ULL, | ||
2228 | -1ULL, | ||
2229 | -1ULL, | ||
2230 | -1ULL, | ||
2231 | -1ULL | ||
2232 | } | ||
2233 | }, | ||
2234 | { "bz", TILE_OPC_BZ, 0x2 /* pipes */, 2 /* num_operands */, | ||
2235 | TREG_ZERO, /* implicitly_written_register */ | ||
2236 | 1, /* can_bundle */ | ||
2237 | { | ||
2238 | /* operands */ | ||
2239 | { 0, }, | ||
2240 | { 10, 20 }, | ||
2241 | { 0, }, | ||
2242 | { 0, }, | ||
2243 | { 0, } | ||
2244 | }, | ||
2245 | { | ||
2246 | /* fixed_bit_masks */ | ||
2247 | 0ULL, | ||
2248 | 0xfc00000780000000ULL, | ||
2249 | 0ULL, | ||
2250 | 0ULL, | ||
2251 | 0ULL | ||
2252 | }, | ||
2253 | { | ||
2254 | /* fixed_bit_values */ | ||
2255 | -1ULL, | ||
2256 | 0x2800000000000000ULL, | ||
2257 | -1ULL, | ||
2258 | -1ULL, | ||
2259 | -1ULL | ||
2260 | } | ||
2261 | }, | ||
2262 | { "bz.sn", TILE_OPC_BZ_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
2263 | TREG_SN, /* implicitly_written_register */ | ||
2264 | 1, /* can_bundle */ | ||
2265 | { | ||
2266 | /* operands */ | ||
2267 | { 0, }, | ||
2268 | { 10, 20 }, | ||
2269 | { 0, }, | ||
2270 | { 0, }, | ||
2271 | { 0, } | ||
2272 | }, | ||
2273 | { | ||
2274 | /* fixed_bit_masks */ | ||
2275 | 0ULL, | ||
2276 | 0xfc00000780000000ULL, | ||
2277 | 0ULL, | ||
2278 | 0ULL, | ||
2279 | 0ULL | ||
2280 | }, | ||
2281 | { | ||
2282 | /* fixed_bit_values */ | ||
2283 | -1ULL, | ||
2284 | 0x2c00000000000000ULL, | ||
2285 | -1ULL, | ||
2286 | -1ULL, | ||
2287 | -1ULL | ||
2288 | } | ||
2289 | }, | ||
2290 | { "bzt", TILE_OPC_BZT, 0x2 /* pipes */, 2 /* num_operands */, | ||
2291 | TREG_ZERO, /* implicitly_written_register */ | ||
2292 | 1, /* can_bundle */ | ||
2293 | { | ||
2294 | /* operands */ | ||
2295 | { 0, }, | ||
2296 | { 10, 20 }, | ||
2297 | { 0, }, | ||
2298 | { 0, }, | ||
2299 | { 0, } | ||
2300 | }, | ||
2301 | { | ||
2302 | /* fixed_bit_masks */ | ||
2303 | 0ULL, | ||
2304 | 0xfc00000780000000ULL, | ||
2305 | 0ULL, | ||
2306 | 0ULL, | ||
2307 | 0ULL | ||
2308 | }, | ||
2309 | { | ||
2310 | /* fixed_bit_values */ | ||
2311 | -1ULL, | ||
2312 | 0x2800000080000000ULL, | ||
2313 | -1ULL, | ||
2314 | -1ULL, | ||
2315 | -1ULL | ||
2316 | } | ||
2317 | }, | ||
2318 | { "bzt.sn", TILE_OPC_BZT_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
2319 | TREG_SN, /* implicitly_written_register */ | ||
2320 | 1, /* can_bundle */ | ||
2321 | { | ||
2322 | /* operands */ | ||
2323 | { 0, }, | ||
2324 | { 10, 20 }, | ||
2325 | { 0, }, | ||
2326 | { 0, }, | ||
2327 | { 0, } | ||
2328 | }, | ||
2329 | { | ||
2330 | /* fixed_bit_masks */ | ||
2331 | 0ULL, | ||
2332 | 0xfc00000780000000ULL, | ||
2333 | 0ULL, | ||
2334 | 0ULL, | ||
2335 | 0ULL | ||
2336 | }, | ||
2337 | { | ||
2338 | /* fixed_bit_values */ | ||
2339 | -1ULL, | ||
2340 | 0x2c00000080000000ULL, | ||
2341 | -1ULL, | ||
2342 | -1ULL, | ||
2343 | -1ULL | ||
2344 | } | ||
2345 | }, | ||
2346 | { "clz", TILE_OPC_CLZ, 0x5 /* pipes */, 2 /* num_operands */, | ||
2347 | TREG_ZERO, /* implicitly_written_register */ | ||
2348 | 1, /* can_bundle */ | ||
2349 | { | ||
2350 | /* operands */ | ||
2351 | { 7, 8 }, | ||
2352 | { 0, }, | ||
2353 | { 11, 12 }, | ||
2354 | { 0, }, | ||
2355 | { 0, } | ||
2356 | }, | ||
2357 | { | ||
2358 | /* fixed_bit_masks */ | ||
2359 | 0x800000007ffff000ULL, | ||
2360 | 0ULL, | ||
2361 | 0x80000000780ff000ULL, | ||
2362 | 0ULL, | ||
2363 | 0ULL | ||
2364 | }, | ||
2365 | { | ||
2366 | /* fixed_bit_values */ | ||
2367 | 0x0000000070163000ULL, | ||
2368 | -1ULL, | ||
2369 | 0x80000000680a3000ULL, | ||
2370 | -1ULL, | ||
2371 | -1ULL | ||
2372 | } | ||
2373 | }, | ||
2374 | { "clz.sn", TILE_OPC_CLZ_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
2375 | TREG_SN, /* implicitly_written_register */ | ||
2376 | 1, /* can_bundle */ | ||
2377 | { | ||
2378 | /* operands */ | ||
2379 | { 7, 8 }, | ||
2380 | { 0, }, | ||
2381 | { 0, }, | ||
2382 | { 0, }, | ||
2383 | { 0, } | ||
2384 | }, | ||
2385 | { | ||
2386 | /* fixed_bit_masks */ | ||
2387 | 0x800000007ffff000ULL, | ||
2388 | 0ULL, | ||
2389 | 0ULL, | ||
2390 | 0ULL, | ||
2391 | 0ULL | ||
2392 | }, | ||
2393 | { | ||
2394 | /* fixed_bit_values */ | ||
2395 | 0x0000000078163000ULL, | ||
2396 | -1ULL, | ||
2397 | -1ULL, | ||
2398 | -1ULL, | ||
2399 | -1ULL | ||
2400 | } | ||
2401 | }, | ||
2402 | { "crc32_32", TILE_OPC_CRC32_32, 0x1 /* pipes */, 3 /* num_operands */, | ||
2403 | TREG_ZERO, /* implicitly_written_register */ | ||
2404 | 1, /* can_bundle */ | ||
2405 | { | ||
2406 | /* operands */ | ||
2407 | { 7, 8, 16 }, | ||
2408 | { 0, }, | ||
2409 | { 0, }, | ||
2410 | { 0, }, | ||
2411 | { 0, } | ||
2412 | }, | ||
2413 | { | ||
2414 | /* fixed_bit_masks */ | ||
2415 | 0x800000007ffc0000ULL, | ||
2416 | 0ULL, | ||
2417 | 0ULL, | ||
2418 | 0ULL, | ||
2419 | 0ULL | ||
2420 | }, | ||
2421 | { | ||
2422 | /* fixed_bit_values */ | ||
2423 | 0x0000000000240000ULL, | ||
2424 | -1ULL, | ||
2425 | -1ULL, | ||
2426 | -1ULL, | ||
2427 | -1ULL | ||
2428 | } | ||
2429 | }, | ||
2430 | { "crc32_32.sn", TILE_OPC_CRC32_32_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
2431 | TREG_SN, /* implicitly_written_register */ | ||
2432 | 1, /* can_bundle */ | ||
2433 | { | ||
2434 | /* operands */ | ||
2435 | { 7, 8, 16 }, | ||
2436 | { 0, }, | ||
2437 | { 0, }, | ||
2438 | { 0, }, | ||
2439 | { 0, } | ||
2440 | }, | ||
2441 | { | ||
2442 | /* fixed_bit_masks */ | ||
2443 | 0x800000007ffc0000ULL, | ||
2444 | 0ULL, | ||
2445 | 0ULL, | ||
2446 | 0ULL, | ||
2447 | 0ULL | ||
2448 | }, | ||
2449 | { | ||
2450 | /* fixed_bit_values */ | ||
2451 | 0x0000000008240000ULL, | ||
2452 | -1ULL, | ||
2453 | -1ULL, | ||
2454 | -1ULL, | ||
2455 | -1ULL | ||
2456 | } | ||
2457 | }, | ||
2458 | { "crc32_8", TILE_OPC_CRC32_8, 0x1 /* pipes */, 3 /* num_operands */, | ||
2459 | TREG_ZERO, /* implicitly_written_register */ | ||
2460 | 1, /* can_bundle */ | ||
2461 | { | ||
2462 | /* operands */ | ||
2463 | { 7, 8, 16 }, | ||
2464 | { 0, }, | ||
2465 | { 0, }, | ||
2466 | { 0, }, | ||
2467 | { 0, } | ||
2468 | }, | ||
2469 | { | ||
2470 | /* fixed_bit_masks */ | ||
2471 | 0x800000007ffc0000ULL, | ||
2472 | 0ULL, | ||
2473 | 0ULL, | ||
2474 | 0ULL, | ||
2475 | 0ULL | ||
2476 | }, | ||
2477 | { | ||
2478 | /* fixed_bit_values */ | ||
2479 | 0x0000000000280000ULL, | ||
2480 | -1ULL, | ||
2481 | -1ULL, | ||
2482 | -1ULL, | ||
2483 | -1ULL | ||
2484 | } | ||
2485 | }, | ||
2486 | { "crc32_8.sn", TILE_OPC_CRC32_8_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
2487 | TREG_SN, /* implicitly_written_register */ | ||
2488 | 1, /* can_bundle */ | ||
2489 | { | ||
2490 | /* operands */ | ||
2491 | { 7, 8, 16 }, | ||
2492 | { 0, }, | ||
2493 | { 0, }, | ||
2494 | { 0, }, | ||
2495 | { 0, } | ||
2496 | }, | ||
2497 | { | ||
2498 | /* fixed_bit_masks */ | ||
2499 | 0x800000007ffc0000ULL, | ||
2500 | 0ULL, | ||
2501 | 0ULL, | ||
2502 | 0ULL, | ||
2503 | 0ULL | ||
2504 | }, | ||
2505 | { | ||
2506 | /* fixed_bit_values */ | ||
2507 | 0x0000000008280000ULL, | ||
2508 | -1ULL, | ||
2509 | -1ULL, | ||
2510 | -1ULL, | ||
2511 | -1ULL | ||
2512 | } | ||
2513 | }, | ||
2514 | { "ctz", TILE_OPC_CTZ, 0x5 /* pipes */, 2 /* num_operands */, | ||
2515 | TREG_ZERO, /* implicitly_written_register */ | ||
2516 | 1, /* can_bundle */ | ||
2517 | { | ||
2518 | /* operands */ | ||
2519 | { 7, 8 }, | ||
2520 | { 0, }, | ||
2521 | { 11, 12 }, | ||
2522 | { 0, }, | ||
2523 | { 0, } | ||
2524 | }, | ||
2525 | { | ||
2526 | /* fixed_bit_masks */ | ||
2527 | 0x800000007ffff000ULL, | ||
2528 | 0ULL, | ||
2529 | 0x80000000780ff000ULL, | ||
2530 | 0ULL, | ||
2531 | 0ULL | ||
2532 | }, | ||
2533 | { | ||
2534 | /* fixed_bit_values */ | ||
2535 | 0x0000000070164000ULL, | ||
2536 | -1ULL, | ||
2537 | 0x80000000680a4000ULL, | ||
2538 | -1ULL, | ||
2539 | -1ULL | ||
2540 | } | ||
2541 | }, | ||
2542 | { "ctz.sn", TILE_OPC_CTZ_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
2543 | TREG_SN, /* implicitly_written_register */ | ||
2544 | 1, /* can_bundle */ | ||
2545 | { | ||
2546 | /* operands */ | ||
2547 | { 7, 8 }, | ||
2548 | { 0, }, | ||
2549 | { 0, }, | ||
2550 | { 0, }, | ||
2551 | { 0, } | ||
2552 | }, | ||
2553 | { | ||
2554 | /* fixed_bit_masks */ | ||
2555 | 0x800000007ffff000ULL, | ||
2556 | 0ULL, | ||
2557 | 0ULL, | ||
2558 | 0ULL, | ||
2559 | 0ULL | ||
2560 | }, | ||
2561 | { | ||
2562 | /* fixed_bit_values */ | ||
2563 | 0x0000000078164000ULL, | ||
2564 | -1ULL, | ||
2565 | -1ULL, | ||
2566 | -1ULL, | ||
2567 | -1ULL | ||
2568 | } | ||
2569 | }, | ||
2570 | { "drain", TILE_OPC_DRAIN, 0x2 /* pipes */, 0 /* num_operands */, | ||
2571 | TREG_ZERO, /* implicitly_written_register */ | ||
2572 | 0, /* can_bundle */ | ||
2573 | { | ||
2574 | /* operands */ | ||
2575 | { 0, }, | ||
2576 | { }, | ||
2577 | { 0, }, | ||
2578 | { 0, }, | ||
2579 | { 0, } | ||
2580 | }, | ||
2581 | { | ||
2582 | /* fixed_bit_masks */ | ||
2583 | 0ULL, | ||
2584 | 0xfbfff80000000000ULL, | ||
2585 | 0ULL, | ||
2586 | 0ULL, | ||
2587 | 0ULL | ||
2588 | }, | ||
2589 | { | ||
2590 | /* fixed_bit_values */ | ||
2591 | -1ULL, | ||
2592 | 0x400b080000000000ULL, | ||
2593 | -1ULL, | ||
2594 | -1ULL, | ||
2595 | -1ULL | ||
2596 | } | ||
2597 | }, | ||
2598 | { "dtlbpr", TILE_OPC_DTLBPR, 0x2 /* pipes */, 1 /* num_operands */, | ||
2599 | TREG_ZERO, /* implicitly_written_register */ | ||
2600 | 1, /* can_bundle */ | ||
2601 | { | ||
2602 | /* operands */ | ||
2603 | { 0, }, | ||
2604 | { 10 }, | ||
2605 | { 0, }, | ||
2606 | { 0, }, | ||
2607 | { 0, } | ||
2608 | }, | ||
2609 | { | ||
2610 | /* fixed_bit_masks */ | ||
2611 | 0ULL, | ||
2612 | 0xfbfff80000000000ULL, | ||
2613 | 0ULL, | ||
2614 | 0ULL, | ||
2615 | 0ULL | ||
2616 | }, | ||
2617 | { | ||
2618 | /* fixed_bit_values */ | ||
2619 | -1ULL, | ||
2620 | 0x400b100000000000ULL, | ||
2621 | -1ULL, | ||
2622 | -1ULL, | ||
2623 | -1ULL | ||
2624 | } | ||
2625 | }, | ||
2626 | { "dword_align", TILE_OPC_DWORD_ALIGN, 0x1 /* pipes */, 3 /* num_operands */, | ||
2627 | TREG_ZERO, /* implicitly_written_register */ | ||
2628 | 1, /* can_bundle */ | ||
2629 | { | ||
2630 | /* operands */ | ||
2631 | { 21, 8, 16 }, | ||
2632 | { 0, }, | ||
2633 | { 0, }, | ||
2634 | { 0, }, | ||
2635 | { 0, } | ||
2636 | }, | ||
2637 | { | ||
2638 | /* fixed_bit_masks */ | ||
2639 | 0x800000007ffc0000ULL, | ||
2640 | 0ULL, | ||
2641 | 0ULL, | ||
2642 | 0ULL, | ||
2643 | 0ULL | ||
2644 | }, | ||
2645 | { | ||
2646 | /* fixed_bit_values */ | ||
2647 | 0x00000000017c0000ULL, | ||
2648 | -1ULL, | ||
2649 | -1ULL, | ||
2650 | -1ULL, | ||
2651 | -1ULL | ||
2652 | } | ||
2653 | }, | ||
2654 | { "dword_align.sn", TILE_OPC_DWORD_ALIGN_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
2655 | TREG_SN, /* implicitly_written_register */ | ||
2656 | 1, /* can_bundle */ | ||
2657 | { | ||
2658 | /* operands */ | ||
2659 | { 21, 8, 16 }, | ||
2660 | { 0, }, | ||
2661 | { 0, }, | ||
2662 | { 0, }, | ||
2663 | { 0, } | ||
2664 | }, | ||
2665 | { | ||
2666 | /* fixed_bit_masks */ | ||
2667 | 0x800000007ffc0000ULL, | ||
2668 | 0ULL, | ||
2669 | 0ULL, | ||
2670 | 0ULL, | ||
2671 | 0ULL | ||
2672 | }, | ||
2673 | { | ||
2674 | /* fixed_bit_values */ | ||
2675 | 0x00000000097c0000ULL, | ||
2676 | -1ULL, | ||
2677 | -1ULL, | ||
2678 | -1ULL, | ||
2679 | -1ULL | ||
2680 | } | ||
2681 | }, | ||
2682 | { "finv", TILE_OPC_FINV, 0x2 /* pipes */, 1 /* num_operands */, | ||
2683 | TREG_ZERO, /* implicitly_written_register */ | ||
2684 | 1, /* can_bundle */ | ||
2685 | { | ||
2686 | /* operands */ | ||
2687 | { 0, }, | ||
2688 | { 10 }, | ||
2689 | { 0, }, | ||
2690 | { 0, }, | ||
2691 | { 0, } | ||
2692 | }, | ||
2693 | { | ||
2694 | /* fixed_bit_masks */ | ||
2695 | 0ULL, | ||
2696 | 0xfbfff80000000000ULL, | ||
2697 | 0ULL, | ||
2698 | 0ULL, | ||
2699 | 0ULL | ||
2700 | }, | ||
2701 | { | ||
2702 | /* fixed_bit_values */ | ||
2703 | -1ULL, | ||
2704 | 0x400b180000000000ULL, | ||
2705 | -1ULL, | ||
2706 | -1ULL, | ||
2707 | -1ULL | ||
2708 | } | ||
2709 | }, | ||
2710 | { "flush", TILE_OPC_FLUSH, 0x2 /* pipes */, 1 /* num_operands */, | ||
2711 | TREG_ZERO, /* implicitly_written_register */ | ||
2712 | 1, /* can_bundle */ | ||
2713 | { | ||
2714 | /* operands */ | ||
2715 | { 0, }, | ||
2716 | { 10 }, | ||
2717 | { 0, }, | ||
2718 | { 0, }, | ||
2719 | { 0, } | ||
2720 | }, | ||
2721 | { | ||
2722 | /* fixed_bit_masks */ | ||
2723 | 0ULL, | ||
2724 | 0xfbfff80000000000ULL, | ||
2725 | 0ULL, | ||
2726 | 0ULL, | ||
2727 | 0ULL | ||
2728 | }, | ||
2729 | { | ||
2730 | /* fixed_bit_values */ | ||
2731 | -1ULL, | ||
2732 | 0x400b200000000000ULL, | ||
2733 | -1ULL, | ||
2734 | -1ULL, | ||
2735 | -1ULL | ||
2736 | } | ||
2737 | }, | ||
2738 | { "fnop", TILE_OPC_FNOP, 0xf /* pipes */, 0 /* num_operands */, | ||
2739 | TREG_ZERO, /* implicitly_written_register */ | ||
2740 | 1, /* can_bundle */ | ||
2741 | { | ||
2742 | /* operands */ | ||
2743 | { }, | ||
2744 | { }, | ||
2745 | { }, | ||
2746 | { }, | ||
2747 | { 0, } | ||
2748 | }, | ||
2749 | { | ||
2750 | /* fixed_bit_masks */ | ||
2751 | 0x8000000077fff000ULL, | ||
2752 | 0xfbfff80000000000ULL, | ||
2753 | 0x80000000780ff000ULL, | ||
2754 | 0xf807f80000000000ULL, | ||
2755 | 0ULL | ||
2756 | }, | ||
2757 | { | ||
2758 | /* fixed_bit_values */ | ||
2759 | 0x0000000070165000ULL, | ||
2760 | 0x400b280000000000ULL, | ||
2761 | 0x80000000680a5000ULL, | ||
2762 | 0xd805080000000000ULL, | ||
2763 | -1ULL | ||
2764 | } | ||
2765 | }, | ||
2766 | { "icoh", TILE_OPC_ICOH, 0x2 /* pipes */, 1 /* num_operands */, | ||
2767 | TREG_ZERO, /* implicitly_written_register */ | ||
2768 | 1, /* can_bundle */ | ||
2769 | { | ||
2770 | /* operands */ | ||
2771 | { 0, }, | ||
2772 | { 10 }, | ||
2773 | { 0, }, | ||
2774 | { 0, }, | ||
2775 | { 0, } | ||
2776 | }, | ||
2777 | { | ||
2778 | /* fixed_bit_masks */ | ||
2779 | 0ULL, | ||
2780 | 0xfbfff80000000000ULL, | ||
2781 | 0ULL, | ||
2782 | 0ULL, | ||
2783 | 0ULL | ||
2784 | }, | ||
2785 | { | ||
2786 | /* fixed_bit_values */ | ||
2787 | -1ULL, | ||
2788 | 0x400b300000000000ULL, | ||
2789 | -1ULL, | ||
2790 | -1ULL, | ||
2791 | -1ULL | ||
2792 | } | ||
2793 | }, | ||
2794 | { "ill", TILE_OPC_ILL, 0xa /* pipes */, 0 /* num_operands */, | ||
2795 | TREG_ZERO, /* implicitly_written_register */ | ||
2796 | 1, /* can_bundle */ | ||
2797 | { | ||
2798 | /* operands */ | ||
2799 | { 0, }, | ||
2800 | { }, | ||
2801 | { 0, }, | ||
2802 | { }, | ||
2803 | { 0, } | ||
2804 | }, | ||
2805 | { | ||
2806 | /* fixed_bit_masks */ | ||
2807 | 0ULL, | ||
2808 | 0xfbfff80000000000ULL, | ||
2809 | 0ULL, | ||
2810 | 0xf807f80000000000ULL, | ||
2811 | 0ULL | ||
2812 | }, | ||
2813 | { | ||
2814 | /* fixed_bit_values */ | ||
2815 | -1ULL, | ||
2816 | 0x400b380000000000ULL, | ||
2817 | -1ULL, | ||
2818 | 0xd805100000000000ULL, | ||
2819 | -1ULL | ||
2820 | } | ||
2821 | }, | ||
2822 | { "inthb", TILE_OPC_INTHB, 0x3 /* pipes */, 3 /* num_operands */, | ||
2823 | TREG_ZERO, /* implicitly_written_register */ | ||
2824 | 1, /* can_bundle */ | ||
2825 | { | ||
2826 | /* operands */ | ||
2827 | { 7, 8, 16 }, | ||
2828 | { 9, 10, 17 }, | ||
2829 | { 0, }, | ||
2830 | { 0, }, | ||
2831 | { 0, } | ||
2832 | }, | ||
2833 | { | ||
2834 | /* fixed_bit_masks */ | ||
2835 | 0x800000007ffc0000ULL, | ||
2836 | 0xfffe000000000000ULL, | ||
2837 | 0ULL, | ||
2838 | 0ULL, | ||
2839 | 0ULL | ||
2840 | }, | ||
2841 | { | ||
2842 | /* fixed_bit_values */ | ||
2843 | 0x00000000002c0000ULL, | ||
2844 | 0x080a000000000000ULL, | ||
2845 | -1ULL, | ||
2846 | -1ULL, | ||
2847 | -1ULL | ||
2848 | } | ||
2849 | }, | ||
2850 | { "inthb.sn", TILE_OPC_INTHB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
2851 | TREG_SN, /* implicitly_written_register */ | ||
2852 | 1, /* can_bundle */ | ||
2853 | { | ||
2854 | /* operands */ | ||
2855 | { 7, 8, 16 }, | ||
2856 | { 9, 10, 17 }, | ||
2857 | { 0, }, | ||
2858 | { 0, }, | ||
2859 | { 0, } | ||
2860 | }, | ||
2861 | { | ||
2862 | /* fixed_bit_masks */ | ||
2863 | 0x800000007ffc0000ULL, | ||
2864 | 0xfffe000000000000ULL, | ||
2865 | 0ULL, | ||
2866 | 0ULL, | ||
2867 | 0ULL | ||
2868 | }, | ||
2869 | { | ||
2870 | /* fixed_bit_values */ | ||
2871 | 0x00000000082c0000ULL, | ||
2872 | 0x0c0a000000000000ULL, | ||
2873 | -1ULL, | ||
2874 | -1ULL, | ||
2875 | -1ULL | ||
2876 | } | ||
2877 | }, | ||
2878 | { "inthh", TILE_OPC_INTHH, 0x3 /* pipes */, 3 /* num_operands */, | ||
2879 | TREG_ZERO, /* implicitly_written_register */ | ||
2880 | 1, /* can_bundle */ | ||
2881 | { | ||
2882 | /* operands */ | ||
2883 | { 7, 8, 16 }, | ||
2884 | { 9, 10, 17 }, | ||
2885 | { 0, }, | ||
2886 | { 0, }, | ||
2887 | { 0, } | ||
2888 | }, | ||
2889 | { | ||
2890 | /* fixed_bit_masks */ | ||
2891 | 0x800000007ffc0000ULL, | ||
2892 | 0xfffe000000000000ULL, | ||
2893 | 0ULL, | ||
2894 | 0ULL, | ||
2895 | 0ULL | ||
2896 | }, | ||
2897 | { | ||
2898 | /* fixed_bit_values */ | ||
2899 | 0x0000000000300000ULL, | ||
2900 | 0x080c000000000000ULL, | ||
2901 | -1ULL, | ||
2902 | -1ULL, | ||
2903 | -1ULL | ||
2904 | } | ||
2905 | }, | ||
2906 | { "inthh.sn", TILE_OPC_INTHH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
2907 | TREG_SN, /* implicitly_written_register */ | ||
2908 | 1, /* can_bundle */ | ||
2909 | { | ||
2910 | /* operands */ | ||
2911 | { 7, 8, 16 }, | ||
2912 | { 9, 10, 17 }, | ||
2913 | { 0, }, | ||
2914 | { 0, }, | ||
2915 | { 0, } | ||
2916 | }, | ||
2917 | { | ||
2918 | /* fixed_bit_masks */ | ||
2919 | 0x800000007ffc0000ULL, | ||
2920 | 0xfffe000000000000ULL, | ||
2921 | 0ULL, | ||
2922 | 0ULL, | ||
2923 | 0ULL | ||
2924 | }, | ||
2925 | { | ||
2926 | /* fixed_bit_values */ | ||
2927 | 0x0000000008300000ULL, | ||
2928 | 0x0c0c000000000000ULL, | ||
2929 | -1ULL, | ||
2930 | -1ULL, | ||
2931 | -1ULL | ||
2932 | } | ||
2933 | }, | ||
2934 | { "intlb", TILE_OPC_INTLB, 0x3 /* pipes */, 3 /* num_operands */, | ||
2935 | TREG_ZERO, /* implicitly_written_register */ | ||
2936 | 1, /* can_bundle */ | ||
2937 | { | ||
2938 | /* operands */ | ||
2939 | { 7, 8, 16 }, | ||
2940 | { 9, 10, 17 }, | ||
2941 | { 0, }, | ||
2942 | { 0, }, | ||
2943 | { 0, } | ||
2944 | }, | ||
2945 | { | ||
2946 | /* fixed_bit_masks */ | ||
2947 | 0x800000007ffc0000ULL, | ||
2948 | 0xfffe000000000000ULL, | ||
2949 | 0ULL, | ||
2950 | 0ULL, | ||
2951 | 0ULL | ||
2952 | }, | ||
2953 | { | ||
2954 | /* fixed_bit_values */ | ||
2955 | 0x0000000000340000ULL, | ||
2956 | 0x080e000000000000ULL, | ||
2957 | -1ULL, | ||
2958 | -1ULL, | ||
2959 | -1ULL | ||
2960 | } | ||
2961 | }, | ||
2962 | { "intlb.sn", TILE_OPC_INTLB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
2963 | TREG_SN, /* implicitly_written_register */ | ||
2964 | 1, /* can_bundle */ | ||
2965 | { | ||
2966 | /* operands */ | ||
2967 | { 7, 8, 16 }, | ||
2968 | { 9, 10, 17 }, | ||
2969 | { 0, }, | ||
2970 | { 0, }, | ||
2971 | { 0, } | ||
2972 | }, | ||
2973 | { | ||
2974 | /* fixed_bit_masks */ | ||
2975 | 0x800000007ffc0000ULL, | ||
2976 | 0xfffe000000000000ULL, | ||
2977 | 0ULL, | ||
2978 | 0ULL, | ||
2979 | 0ULL | ||
2980 | }, | ||
2981 | { | ||
2982 | /* fixed_bit_values */ | ||
2983 | 0x0000000008340000ULL, | ||
2984 | 0x0c0e000000000000ULL, | ||
2985 | -1ULL, | ||
2986 | -1ULL, | ||
2987 | -1ULL | ||
2988 | } | ||
2989 | }, | ||
2990 | { "intlh", TILE_OPC_INTLH, 0x3 /* pipes */, 3 /* num_operands */, | ||
2991 | TREG_ZERO, /* implicitly_written_register */ | ||
2992 | 1, /* can_bundle */ | ||
2993 | { | ||
2994 | /* operands */ | ||
2995 | { 7, 8, 16 }, | ||
2996 | { 9, 10, 17 }, | ||
2997 | { 0, }, | ||
2998 | { 0, }, | ||
2999 | { 0, } | ||
3000 | }, | ||
3001 | { | ||
3002 | /* fixed_bit_masks */ | ||
3003 | 0x800000007ffc0000ULL, | ||
3004 | 0xfffe000000000000ULL, | ||
3005 | 0ULL, | ||
3006 | 0ULL, | ||
3007 | 0ULL | ||
3008 | }, | ||
3009 | { | ||
3010 | /* fixed_bit_values */ | ||
3011 | 0x0000000000380000ULL, | ||
3012 | 0x0810000000000000ULL, | ||
3013 | -1ULL, | ||
3014 | -1ULL, | ||
3015 | -1ULL | ||
3016 | } | ||
3017 | }, | ||
3018 | { "intlh.sn", TILE_OPC_INTLH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
3019 | TREG_SN, /* implicitly_written_register */ | ||
3020 | 1, /* can_bundle */ | ||
3021 | { | ||
3022 | /* operands */ | ||
3023 | { 7, 8, 16 }, | ||
3024 | { 9, 10, 17 }, | ||
3025 | { 0, }, | ||
3026 | { 0, }, | ||
3027 | { 0, } | ||
3028 | }, | ||
3029 | { | ||
3030 | /* fixed_bit_masks */ | ||
3031 | 0x800000007ffc0000ULL, | ||
3032 | 0xfffe000000000000ULL, | ||
3033 | 0ULL, | ||
3034 | 0ULL, | ||
3035 | 0ULL | ||
3036 | }, | ||
3037 | { | ||
3038 | /* fixed_bit_values */ | ||
3039 | 0x0000000008380000ULL, | ||
3040 | 0x0c10000000000000ULL, | ||
3041 | -1ULL, | ||
3042 | -1ULL, | ||
3043 | -1ULL | ||
3044 | } | ||
3045 | }, | ||
3046 | { "inv", TILE_OPC_INV, 0x2 /* pipes */, 1 /* num_operands */, | ||
3047 | TREG_ZERO, /* implicitly_written_register */ | ||
3048 | 1, /* can_bundle */ | ||
3049 | { | ||
3050 | /* operands */ | ||
3051 | { 0, }, | ||
3052 | { 10 }, | ||
3053 | { 0, }, | ||
3054 | { 0, }, | ||
3055 | { 0, } | ||
3056 | }, | ||
3057 | { | ||
3058 | /* fixed_bit_masks */ | ||
3059 | 0ULL, | ||
3060 | 0xfbfff80000000000ULL, | ||
3061 | 0ULL, | ||
3062 | 0ULL, | ||
3063 | 0ULL | ||
3064 | }, | ||
3065 | { | ||
3066 | /* fixed_bit_values */ | ||
3067 | -1ULL, | ||
3068 | 0x400b400000000000ULL, | ||
3069 | -1ULL, | ||
3070 | -1ULL, | ||
3071 | -1ULL | ||
3072 | } | ||
3073 | }, | ||
3074 | { "iret", TILE_OPC_IRET, 0x2 /* pipes */, 0 /* num_operands */, | ||
3075 | TREG_ZERO, /* implicitly_written_register */ | ||
3076 | 1, /* can_bundle */ | ||
3077 | { | ||
3078 | /* operands */ | ||
3079 | { 0, }, | ||
3080 | { }, | ||
3081 | { 0, }, | ||
3082 | { 0, }, | ||
3083 | { 0, } | ||
3084 | }, | ||
3085 | { | ||
3086 | /* fixed_bit_masks */ | ||
3087 | 0ULL, | ||
3088 | 0xfbfff80000000000ULL, | ||
3089 | 0ULL, | ||
3090 | 0ULL, | ||
3091 | 0ULL | ||
3092 | }, | ||
3093 | { | ||
3094 | /* fixed_bit_values */ | ||
3095 | -1ULL, | ||
3096 | 0x400b480000000000ULL, | ||
3097 | -1ULL, | ||
3098 | -1ULL, | ||
3099 | -1ULL | ||
3100 | } | ||
3101 | }, | ||
3102 | { "jalb", TILE_OPC_JALB, 0x2 /* pipes */, 1 /* num_operands */, | ||
3103 | TREG_LR, /* implicitly_written_register */ | ||
3104 | 1, /* can_bundle */ | ||
3105 | { | ||
3106 | /* operands */ | ||
3107 | { 0, }, | ||
3108 | { 22 }, | ||
3109 | { 0, }, | ||
3110 | { 0, }, | ||
3111 | { 0, } | ||
3112 | }, | ||
3113 | { | ||
3114 | /* fixed_bit_masks */ | ||
3115 | 0ULL, | ||
3116 | 0xf800000000000000ULL, | ||
3117 | 0ULL, | ||
3118 | 0ULL, | ||
3119 | 0ULL | ||
3120 | }, | ||
3121 | { | ||
3122 | /* fixed_bit_values */ | ||
3123 | -1ULL, | ||
3124 | 0x6800000000000000ULL, | ||
3125 | -1ULL, | ||
3126 | -1ULL, | ||
3127 | -1ULL | ||
3128 | } | ||
3129 | }, | ||
3130 | { "jalf", TILE_OPC_JALF, 0x2 /* pipes */, 1 /* num_operands */, | ||
3131 | TREG_LR, /* implicitly_written_register */ | ||
3132 | 1, /* can_bundle */ | ||
3133 | { | ||
3134 | /* operands */ | ||
3135 | { 0, }, | ||
3136 | { 22 }, | ||
3137 | { 0, }, | ||
3138 | { 0, }, | ||
3139 | { 0, } | ||
3140 | }, | ||
3141 | { | ||
3142 | /* fixed_bit_masks */ | ||
3143 | 0ULL, | ||
3144 | 0xf800000000000000ULL, | ||
3145 | 0ULL, | ||
3146 | 0ULL, | ||
3147 | 0ULL | ||
3148 | }, | ||
3149 | { | ||
3150 | /* fixed_bit_values */ | ||
3151 | -1ULL, | ||
3152 | 0x6000000000000000ULL, | ||
3153 | -1ULL, | ||
3154 | -1ULL, | ||
3155 | -1ULL | ||
3156 | } | ||
3157 | }, | ||
3158 | { "jalr", TILE_OPC_JALR, 0x2 /* pipes */, 1 /* num_operands */, | ||
3159 | TREG_LR, /* implicitly_written_register */ | ||
3160 | 1, /* can_bundle */ | ||
3161 | { | ||
3162 | /* operands */ | ||
3163 | { 0, }, | ||
3164 | { 10 }, | ||
3165 | { 0, }, | ||
3166 | { 0, }, | ||
3167 | { 0, } | ||
3168 | }, | ||
3169 | { | ||
3170 | /* fixed_bit_masks */ | ||
3171 | 0ULL, | ||
3172 | 0xfbfe000000000000ULL, | ||
3173 | 0ULL, | ||
3174 | 0ULL, | ||
3175 | 0ULL | ||
3176 | }, | ||
3177 | { | ||
3178 | /* fixed_bit_values */ | ||
3179 | -1ULL, | ||
3180 | 0x0814000000000000ULL, | ||
3181 | -1ULL, | ||
3182 | -1ULL, | ||
3183 | -1ULL | ||
3184 | } | ||
3185 | }, | ||
3186 | { "jalrp", TILE_OPC_JALRP, 0x2 /* pipes */, 1 /* num_operands */, | ||
3187 | TREG_LR, /* implicitly_written_register */ | ||
3188 | 1, /* can_bundle */ | ||
3189 | { | ||
3190 | /* operands */ | ||
3191 | { 0, }, | ||
3192 | { 10 }, | ||
3193 | { 0, }, | ||
3194 | { 0, }, | ||
3195 | { 0, } | ||
3196 | }, | ||
3197 | { | ||
3198 | /* fixed_bit_masks */ | ||
3199 | 0ULL, | ||
3200 | 0xfbfe000000000000ULL, | ||
3201 | 0ULL, | ||
3202 | 0ULL, | ||
3203 | 0ULL | ||
3204 | }, | ||
3205 | { | ||
3206 | /* fixed_bit_values */ | ||
3207 | -1ULL, | ||
3208 | 0x0812000000000000ULL, | ||
3209 | -1ULL, | ||
3210 | -1ULL, | ||
3211 | -1ULL | ||
3212 | } | ||
3213 | }, | ||
3214 | { "jb", TILE_OPC_JB, 0x2 /* pipes */, 1 /* num_operands */, | ||
3215 | TREG_ZERO, /* implicitly_written_register */ | ||
3216 | 1, /* can_bundle */ | ||
3217 | { | ||
3218 | /* operands */ | ||
3219 | { 0, }, | ||
3220 | { 22 }, | ||
3221 | { 0, }, | ||
3222 | { 0, }, | ||
3223 | { 0, } | ||
3224 | }, | ||
3225 | { | ||
3226 | /* fixed_bit_masks */ | ||
3227 | 0ULL, | ||
3228 | 0xf800000000000000ULL, | ||
3229 | 0ULL, | ||
3230 | 0ULL, | ||
3231 | 0ULL | ||
3232 | }, | ||
3233 | { | ||
3234 | /* fixed_bit_values */ | ||
3235 | -1ULL, | ||
3236 | 0x5800000000000000ULL, | ||
3237 | -1ULL, | ||
3238 | -1ULL, | ||
3239 | -1ULL | ||
3240 | } | ||
3241 | }, | ||
3242 | { "jf", TILE_OPC_JF, 0x2 /* pipes */, 1 /* num_operands */, | ||
3243 | TREG_ZERO, /* implicitly_written_register */ | ||
3244 | 1, /* can_bundle */ | ||
3245 | { | ||
3246 | /* operands */ | ||
3247 | { 0, }, | ||
3248 | { 22 }, | ||
3249 | { 0, }, | ||
3250 | { 0, }, | ||
3251 | { 0, } | ||
3252 | }, | ||
3253 | { | ||
3254 | /* fixed_bit_masks */ | ||
3255 | 0ULL, | ||
3256 | 0xf800000000000000ULL, | ||
3257 | 0ULL, | ||
3258 | 0ULL, | ||
3259 | 0ULL | ||
3260 | }, | ||
3261 | { | ||
3262 | /* fixed_bit_values */ | ||
3263 | -1ULL, | ||
3264 | 0x5000000000000000ULL, | ||
3265 | -1ULL, | ||
3266 | -1ULL, | ||
3267 | -1ULL | ||
3268 | } | ||
3269 | }, | ||
3270 | { "jr", TILE_OPC_JR, 0x2 /* pipes */, 1 /* num_operands */, | ||
3271 | TREG_ZERO, /* implicitly_written_register */ | ||
3272 | 1, /* can_bundle */ | ||
3273 | { | ||
3274 | /* operands */ | ||
3275 | { 0, }, | ||
3276 | { 10 }, | ||
3277 | { 0, }, | ||
3278 | { 0, }, | ||
3279 | { 0, } | ||
3280 | }, | ||
3281 | { | ||
3282 | /* fixed_bit_masks */ | ||
3283 | 0ULL, | ||
3284 | 0xfbfe000000000000ULL, | ||
3285 | 0ULL, | ||
3286 | 0ULL, | ||
3287 | 0ULL | ||
3288 | }, | ||
3289 | { | ||
3290 | /* fixed_bit_values */ | ||
3291 | -1ULL, | ||
3292 | 0x0818000000000000ULL, | ||
3293 | -1ULL, | ||
3294 | -1ULL, | ||
3295 | -1ULL | ||
3296 | } | ||
3297 | }, | ||
3298 | { "jrp", TILE_OPC_JRP, 0x2 /* pipes */, 1 /* num_operands */, | ||
3299 | TREG_ZERO, /* implicitly_written_register */ | ||
3300 | 1, /* can_bundle */ | ||
3301 | { | ||
3302 | /* operands */ | ||
3303 | { 0, }, | ||
3304 | { 10 }, | ||
3305 | { 0, }, | ||
3306 | { 0, }, | ||
3307 | { 0, } | ||
3308 | }, | ||
3309 | { | ||
3310 | /* fixed_bit_masks */ | ||
3311 | 0ULL, | ||
3312 | 0xfbfe000000000000ULL, | ||
3313 | 0ULL, | ||
3314 | 0ULL, | ||
3315 | 0ULL | ||
3316 | }, | ||
3317 | { | ||
3318 | /* fixed_bit_values */ | ||
3319 | -1ULL, | ||
3320 | 0x0816000000000000ULL, | ||
3321 | -1ULL, | ||
3322 | -1ULL, | ||
3323 | -1ULL | ||
3324 | } | ||
3325 | }, | ||
3326 | { "lb", TILE_OPC_LB, 0x12 /* pipes */, 2 /* num_operands */, | ||
3327 | TREG_ZERO, /* implicitly_written_register */ | ||
3328 | 1, /* can_bundle */ | ||
3329 | { | ||
3330 | /* operands */ | ||
3331 | { 0, }, | ||
3332 | { 9, 10 }, | ||
3333 | { 0, }, | ||
3334 | { 0, }, | ||
3335 | { 23, 15 } | ||
3336 | }, | ||
3337 | { | ||
3338 | /* fixed_bit_masks */ | ||
3339 | 0ULL, | ||
3340 | 0xfffff80000000000ULL, | ||
3341 | 0ULL, | ||
3342 | 0ULL, | ||
3343 | 0x8700000000000000ULL | ||
3344 | }, | ||
3345 | { | ||
3346 | /* fixed_bit_values */ | ||
3347 | -1ULL, | ||
3348 | 0x400b500000000000ULL, | ||
3349 | -1ULL, | ||
3350 | -1ULL, | ||
3351 | 0x8000000000000000ULL | ||
3352 | } | ||
3353 | }, | ||
3354 | { "lb.sn", TILE_OPC_LB_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
3355 | TREG_SN, /* implicitly_written_register */ | ||
3356 | 1, /* can_bundle */ | ||
3357 | { | ||
3358 | /* operands */ | ||
3359 | { 0, }, | ||
3360 | { 9, 10 }, | ||
3361 | { 0, }, | ||
3362 | { 0, }, | ||
3363 | { 0, } | ||
3364 | }, | ||
3365 | { | ||
3366 | /* fixed_bit_masks */ | ||
3367 | 0ULL, | ||
3368 | 0xfffff80000000000ULL, | ||
3369 | 0ULL, | ||
3370 | 0ULL, | ||
3371 | 0ULL | ||
3372 | }, | ||
3373 | { | ||
3374 | /* fixed_bit_values */ | ||
3375 | -1ULL, | ||
3376 | 0x440b500000000000ULL, | ||
3377 | -1ULL, | ||
3378 | -1ULL, | ||
3379 | -1ULL | ||
3380 | } | ||
3381 | }, | ||
3382 | { "lb_u", TILE_OPC_LB_U, 0x12 /* pipes */, 2 /* num_operands */, | ||
3383 | TREG_ZERO, /* implicitly_written_register */ | ||
3384 | 1, /* can_bundle */ | ||
3385 | { | ||
3386 | /* operands */ | ||
3387 | { 0, }, | ||
3388 | { 9, 10 }, | ||
3389 | { 0, }, | ||
3390 | { 0, }, | ||
3391 | { 23, 15 } | ||
3392 | }, | ||
3393 | { | ||
3394 | /* fixed_bit_masks */ | ||
3395 | 0ULL, | ||
3396 | 0xfffff80000000000ULL, | ||
3397 | 0ULL, | ||
3398 | 0ULL, | ||
3399 | 0x8700000000000000ULL | ||
3400 | }, | ||
3401 | { | ||
3402 | /* fixed_bit_values */ | ||
3403 | -1ULL, | ||
3404 | 0x400b580000000000ULL, | ||
3405 | -1ULL, | ||
3406 | -1ULL, | ||
3407 | 0x8100000000000000ULL | ||
3408 | } | ||
3409 | }, | ||
3410 | { "lb_u.sn", TILE_OPC_LB_U_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
3411 | TREG_SN, /* implicitly_written_register */ | ||
3412 | 1, /* can_bundle */ | ||
3413 | { | ||
3414 | /* operands */ | ||
3415 | { 0, }, | ||
3416 | { 9, 10 }, | ||
3417 | { 0, }, | ||
3418 | { 0, }, | ||
3419 | { 0, } | ||
3420 | }, | ||
3421 | { | ||
3422 | /* fixed_bit_masks */ | ||
3423 | 0ULL, | ||
3424 | 0xfffff80000000000ULL, | ||
3425 | 0ULL, | ||
3426 | 0ULL, | ||
3427 | 0ULL | ||
3428 | }, | ||
3429 | { | ||
3430 | /* fixed_bit_values */ | ||
3431 | -1ULL, | ||
3432 | 0x440b580000000000ULL, | ||
3433 | -1ULL, | ||
3434 | -1ULL, | ||
3435 | -1ULL | ||
3436 | } | ||
3437 | }, | ||
3438 | { "lbadd", TILE_OPC_LBADD, 0x2 /* pipes */, 3 /* num_operands */, | ||
3439 | TREG_ZERO, /* implicitly_written_register */ | ||
3440 | 1, /* can_bundle */ | ||
3441 | { | ||
3442 | /* operands */ | ||
3443 | { 0, }, | ||
3444 | { 9, 24, 1 }, | ||
3445 | { 0, }, | ||
3446 | { 0, }, | ||
3447 | { 0, } | ||
3448 | }, | ||
3449 | { | ||
3450 | /* fixed_bit_masks */ | ||
3451 | 0ULL, | ||
3452 | 0xfff8000000000000ULL, | ||
3453 | 0ULL, | ||
3454 | 0ULL, | ||
3455 | 0ULL | ||
3456 | }, | ||
3457 | { | ||
3458 | /* fixed_bit_values */ | ||
3459 | -1ULL, | ||
3460 | 0x30b0000000000000ULL, | ||
3461 | -1ULL, | ||
3462 | -1ULL, | ||
3463 | -1ULL | ||
3464 | } | ||
3465 | }, | ||
3466 | { "lbadd.sn", TILE_OPC_LBADD_SN, 0x2 /* pipes */, 3 /* num_operands */, | ||
3467 | TREG_SN, /* implicitly_written_register */ | ||
3468 | 1, /* can_bundle */ | ||
3469 | { | ||
3470 | /* operands */ | ||
3471 | { 0, }, | ||
3472 | { 9, 24, 1 }, | ||
3473 | { 0, }, | ||
3474 | { 0, }, | ||
3475 | { 0, } | ||
3476 | }, | ||
3477 | { | ||
3478 | /* fixed_bit_masks */ | ||
3479 | 0ULL, | ||
3480 | 0xfff8000000000000ULL, | ||
3481 | 0ULL, | ||
3482 | 0ULL, | ||
3483 | 0ULL | ||
3484 | }, | ||
3485 | { | ||
3486 | /* fixed_bit_values */ | ||
3487 | -1ULL, | ||
3488 | 0x34b0000000000000ULL, | ||
3489 | -1ULL, | ||
3490 | -1ULL, | ||
3491 | -1ULL | ||
3492 | } | ||
3493 | }, | ||
3494 | { "lbadd_u", TILE_OPC_LBADD_U, 0x2 /* pipes */, 3 /* num_operands */, | ||
3495 | TREG_ZERO, /* implicitly_written_register */ | ||
3496 | 1, /* can_bundle */ | ||
3497 | { | ||
3498 | /* operands */ | ||
3499 | { 0, }, | ||
3500 | { 9, 24, 1 }, | ||
3501 | { 0, }, | ||
3502 | { 0, }, | ||
3503 | { 0, } | ||
3504 | }, | ||
3505 | { | ||
3506 | /* fixed_bit_masks */ | ||
3507 | 0ULL, | ||
3508 | 0xfff8000000000000ULL, | ||
3509 | 0ULL, | ||
3510 | 0ULL, | ||
3511 | 0ULL | ||
3512 | }, | ||
3513 | { | ||
3514 | /* fixed_bit_values */ | ||
3515 | -1ULL, | ||
3516 | 0x30b8000000000000ULL, | ||
3517 | -1ULL, | ||
3518 | -1ULL, | ||
3519 | -1ULL | ||
3520 | } | ||
3521 | }, | ||
3522 | { "lbadd_u.sn", TILE_OPC_LBADD_U_SN, 0x2 /* pipes */, 3 /* num_operands */, | ||
3523 | TREG_SN, /* implicitly_written_register */ | ||
3524 | 1, /* can_bundle */ | ||
3525 | { | ||
3526 | /* operands */ | ||
3527 | { 0, }, | ||
3528 | { 9, 24, 1 }, | ||
3529 | { 0, }, | ||
3530 | { 0, }, | ||
3531 | { 0, } | ||
3532 | }, | ||
3533 | { | ||
3534 | /* fixed_bit_masks */ | ||
3535 | 0ULL, | ||
3536 | 0xfff8000000000000ULL, | ||
3537 | 0ULL, | ||
3538 | 0ULL, | ||
3539 | 0ULL | ||
3540 | }, | ||
3541 | { | ||
3542 | /* fixed_bit_values */ | ||
3543 | -1ULL, | ||
3544 | 0x34b8000000000000ULL, | ||
3545 | -1ULL, | ||
3546 | -1ULL, | ||
3547 | -1ULL | ||
3548 | } | ||
3549 | }, | ||
3550 | { "lh", TILE_OPC_LH, 0x12 /* pipes */, 2 /* num_operands */, | ||
3551 | TREG_ZERO, /* implicitly_written_register */ | ||
3552 | 1, /* can_bundle */ | ||
3553 | { | ||
3554 | /* operands */ | ||
3555 | { 0, }, | ||
3556 | { 9, 10 }, | ||
3557 | { 0, }, | ||
3558 | { 0, }, | ||
3559 | { 23, 15 } | ||
3560 | }, | ||
3561 | { | ||
3562 | /* fixed_bit_masks */ | ||
3563 | 0ULL, | ||
3564 | 0xfffff80000000000ULL, | ||
3565 | 0ULL, | ||
3566 | 0ULL, | ||
3567 | 0x8700000000000000ULL | ||
3568 | }, | ||
3569 | { | ||
3570 | /* fixed_bit_values */ | ||
3571 | -1ULL, | ||
3572 | 0x400b600000000000ULL, | ||
3573 | -1ULL, | ||
3574 | -1ULL, | ||
3575 | 0x8200000000000000ULL | ||
3576 | } | ||
3577 | }, | ||
3578 | { "lh.sn", TILE_OPC_LH_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
3579 | TREG_SN, /* implicitly_written_register */ | ||
3580 | 1, /* can_bundle */ | ||
3581 | { | ||
3582 | /* operands */ | ||
3583 | { 0, }, | ||
3584 | { 9, 10 }, | ||
3585 | { 0, }, | ||
3586 | { 0, }, | ||
3587 | { 0, } | ||
3588 | }, | ||
3589 | { | ||
3590 | /* fixed_bit_masks */ | ||
3591 | 0ULL, | ||
3592 | 0xfffff80000000000ULL, | ||
3593 | 0ULL, | ||
3594 | 0ULL, | ||
3595 | 0ULL | ||
3596 | }, | ||
3597 | { | ||
3598 | /* fixed_bit_values */ | ||
3599 | -1ULL, | ||
3600 | 0x440b600000000000ULL, | ||
3601 | -1ULL, | ||
3602 | -1ULL, | ||
3603 | -1ULL | ||
3604 | } | ||
3605 | }, | ||
3606 | { "lh_u", TILE_OPC_LH_U, 0x12 /* pipes */, 2 /* num_operands */, | ||
3607 | TREG_ZERO, /* implicitly_written_register */ | ||
3608 | 1, /* can_bundle */ | ||
3609 | { | ||
3610 | /* operands */ | ||
3611 | { 0, }, | ||
3612 | { 9, 10 }, | ||
3613 | { 0, }, | ||
3614 | { 0, }, | ||
3615 | { 23, 15 } | ||
3616 | }, | ||
3617 | { | ||
3618 | /* fixed_bit_masks */ | ||
3619 | 0ULL, | ||
3620 | 0xfffff80000000000ULL, | ||
3621 | 0ULL, | ||
3622 | 0ULL, | ||
3623 | 0x8700000000000000ULL | ||
3624 | }, | ||
3625 | { | ||
3626 | /* fixed_bit_values */ | ||
3627 | -1ULL, | ||
3628 | 0x400b680000000000ULL, | ||
3629 | -1ULL, | ||
3630 | -1ULL, | ||
3631 | 0x8300000000000000ULL | ||
3632 | } | ||
3633 | }, | ||
3634 | { "lh_u.sn", TILE_OPC_LH_U_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
3635 | TREG_SN, /* implicitly_written_register */ | ||
3636 | 1, /* can_bundle */ | ||
3637 | { | ||
3638 | /* operands */ | ||
3639 | { 0, }, | ||
3640 | { 9, 10 }, | ||
3641 | { 0, }, | ||
3642 | { 0, }, | ||
3643 | { 0, } | ||
3644 | }, | ||
3645 | { | ||
3646 | /* fixed_bit_masks */ | ||
3647 | 0ULL, | ||
3648 | 0xfffff80000000000ULL, | ||
3649 | 0ULL, | ||
3650 | 0ULL, | ||
3651 | 0ULL | ||
3652 | }, | ||
3653 | { | ||
3654 | /* fixed_bit_values */ | ||
3655 | -1ULL, | ||
3656 | 0x440b680000000000ULL, | ||
3657 | -1ULL, | ||
3658 | -1ULL, | ||
3659 | -1ULL | ||
3660 | } | ||
3661 | }, | ||
3662 | { "lhadd", TILE_OPC_LHADD, 0x2 /* pipes */, 3 /* num_operands */, | ||
3663 | TREG_ZERO, /* implicitly_written_register */ | ||
3664 | 1, /* can_bundle */ | ||
3665 | { | ||
3666 | /* operands */ | ||
3667 | { 0, }, | ||
3668 | { 9, 24, 1 }, | ||
3669 | { 0, }, | ||
3670 | { 0, }, | ||
3671 | { 0, } | ||
3672 | }, | ||
3673 | { | ||
3674 | /* fixed_bit_masks */ | ||
3675 | 0ULL, | ||
3676 | 0xfff8000000000000ULL, | ||
3677 | 0ULL, | ||
3678 | 0ULL, | ||
3679 | 0ULL | ||
3680 | }, | ||
3681 | { | ||
3682 | /* fixed_bit_values */ | ||
3683 | -1ULL, | ||
3684 | 0x30c0000000000000ULL, | ||
3685 | -1ULL, | ||
3686 | -1ULL, | ||
3687 | -1ULL | ||
3688 | } | ||
3689 | }, | ||
3690 | { "lhadd.sn", TILE_OPC_LHADD_SN, 0x2 /* pipes */, 3 /* num_operands */, | ||
3691 | TREG_SN, /* implicitly_written_register */ | ||
3692 | 1, /* can_bundle */ | ||
3693 | { | ||
3694 | /* operands */ | ||
3695 | { 0, }, | ||
3696 | { 9, 24, 1 }, | ||
3697 | { 0, }, | ||
3698 | { 0, }, | ||
3699 | { 0, } | ||
3700 | }, | ||
3701 | { | ||
3702 | /* fixed_bit_masks */ | ||
3703 | 0ULL, | ||
3704 | 0xfff8000000000000ULL, | ||
3705 | 0ULL, | ||
3706 | 0ULL, | ||
3707 | 0ULL | ||
3708 | }, | ||
3709 | { | ||
3710 | /* fixed_bit_values */ | ||
3711 | -1ULL, | ||
3712 | 0x34c0000000000000ULL, | ||
3713 | -1ULL, | ||
3714 | -1ULL, | ||
3715 | -1ULL | ||
3716 | } | ||
3717 | }, | ||
3718 | { "lhadd_u", TILE_OPC_LHADD_U, 0x2 /* pipes */, 3 /* num_operands */, | ||
3719 | TREG_ZERO, /* implicitly_written_register */ | ||
3720 | 1, /* can_bundle */ | ||
3721 | { | ||
3722 | /* operands */ | ||
3723 | { 0, }, | ||
3724 | { 9, 24, 1 }, | ||
3725 | { 0, }, | ||
3726 | { 0, }, | ||
3727 | { 0, } | ||
3728 | }, | ||
3729 | { | ||
3730 | /* fixed_bit_masks */ | ||
3731 | 0ULL, | ||
3732 | 0xfff8000000000000ULL, | ||
3733 | 0ULL, | ||
3734 | 0ULL, | ||
3735 | 0ULL | ||
3736 | }, | ||
3737 | { | ||
3738 | /* fixed_bit_values */ | ||
3739 | -1ULL, | ||
3740 | 0x30c8000000000000ULL, | ||
3741 | -1ULL, | ||
3742 | -1ULL, | ||
3743 | -1ULL | ||
3744 | } | ||
3745 | }, | ||
3746 | { "lhadd_u.sn", TILE_OPC_LHADD_U_SN, 0x2 /* pipes */, 3 /* num_operands */, | ||
3747 | TREG_SN, /* implicitly_written_register */ | ||
3748 | 1, /* can_bundle */ | ||
3749 | { | ||
3750 | /* operands */ | ||
3751 | { 0, }, | ||
3752 | { 9, 24, 1 }, | ||
3753 | { 0, }, | ||
3754 | { 0, }, | ||
3755 | { 0, } | ||
3756 | }, | ||
3757 | { | ||
3758 | /* fixed_bit_masks */ | ||
3759 | 0ULL, | ||
3760 | 0xfff8000000000000ULL, | ||
3761 | 0ULL, | ||
3762 | 0ULL, | ||
3763 | 0ULL | ||
3764 | }, | ||
3765 | { | ||
3766 | /* fixed_bit_values */ | ||
3767 | -1ULL, | ||
3768 | 0x34c8000000000000ULL, | ||
3769 | -1ULL, | ||
3770 | -1ULL, | ||
3771 | -1ULL | ||
3772 | } | ||
3773 | }, | ||
3774 | { "lnk", TILE_OPC_LNK, 0x2 /* pipes */, 1 /* num_operands */, | ||
3775 | TREG_ZERO, /* implicitly_written_register */ | ||
3776 | 1, /* can_bundle */ | ||
3777 | { | ||
3778 | /* operands */ | ||
3779 | { 0, }, | ||
3780 | { 9 }, | ||
3781 | { 0, }, | ||
3782 | { 0, }, | ||
3783 | { 0, } | ||
3784 | }, | ||
3785 | { | ||
3786 | /* fixed_bit_masks */ | ||
3787 | 0ULL, | ||
3788 | 0xfffe000000000000ULL, | ||
3789 | 0ULL, | ||
3790 | 0ULL, | ||
3791 | 0ULL | ||
3792 | }, | ||
3793 | { | ||
3794 | /* fixed_bit_values */ | ||
3795 | -1ULL, | ||
3796 | 0x081a000000000000ULL, | ||
3797 | -1ULL, | ||
3798 | -1ULL, | ||
3799 | -1ULL | ||
3800 | } | ||
3801 | }, | ||
3802 | { "lnk.sn", TILE_OPC_LNK_SN, 0x2 /* pipes */, 1 /* num_operands */, | ||
3803 | TREG_SN, /* implicitly_written_register */ | ||
3804 | 1, /* can_bundle */ | ||
3805 | { | ||
3806 | /* operands */ | ||
3807 | { 0, }, | ||
3808 | { 9 }, | ||
3809 | { 0, }, | ||
3810 | { 0, }, | ||
3811 | { 0, } | ||
3812 | }, | ||
3813 | { | ||
3814 | /* fixed_bit_masks */ | ||
3815 | 0ULL, | ||
3816 | 0xfffe000000000000ULL, | ||
3817 | 0ULL, | ||
3818 | 0ULL, | ||
3819 | 0ULL | ||
3820 | }, | ||
3821 | { | ||
3822 | /* fixed_bit_values */ | ||
3823 | -1ULL, | ||
3824 | 0x0c1a000000000000ULL, | ||
3825 | -1ULL, | ||
3826 | -1ULL, | ||
3827 | -1ULL | ||
3828 | } | ||
3829 | }, | ||
3830 | { "lw", TILE_OPC_LW, 0x12 /* pipes */, 2 /* num_operands */, | ||
3831 | TREG_ZERO, /* implicitly_written_register */ | ||
3832 | 1, /* can_bundle */ | ||
3833 | { | ||
3834 | /* operands */ | ||
3835 | { 0, }, | ||
3836 | { 9, 10 }, | ||
3837 | { 0, }, | ||
3838 | { 0, }, | ||
3839 | { 23, 15 } | ||
3840 | }, | ||
3841 | { | ||
3842 | /* fixed_bit_masks */ | ||
3843 | 0ULL, | ||
3844 | 0xfffff80000000000ULL, | ||
3845 | 0ULL, | ||
3846 | 0ULL, | ||
3847 | 0x8700000000000000ULL | ||
3848 | }, | ||
3849 | { | ||
3850 | /* fixed_bit_values */ | ||
3851 | -1ULL, | ||
3852 | 0x400b700000000000ULL, | ||
3853 | -1ULL, | ||
3854 | -1ULL, | ||
3855 | 0x8400000000000000ULL | ||
3856 | } | ||
3857 | }, | ||
3858 | { "lw.sn", TILE_OPC_LW_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
3859 | TREG_SN, /* implicitly_written_register */ | ||
3860 | 1, /* can_bundle */ | ||
3861 | { | ||
3862 | /* operands */ | ||
3863 | { 0, }, | ||
3864 | { 9, 10 }, | ||
3865 | { 0, }, | ||
3866 | { 0, }, | ||
3867 | { 0, } | ||
3868 | }, | ||
3869 | { | ||
3870 | /* fixed_bit_masks */ | ||
3871 | 0ULL, | ||
3872 | 0xfffff80000000000ULL, | ||
3873 | 0ULL, | ||
3874 | 0ULL, | ||
3875 | 0ULL | ||
3876 | }, | ||
3877 | { | ||
3878 | /* fixed_bit_values */ | ||
3879 | -1ULL, | ||
3880 | 0x440b700000000000ULL, | ||
3881 | -1ULL, | ||
3882 | -1ULL, | ||
3883 | -1ULL | ||
3884 | } | ||
3885 | }, | ||
3886 | { "lw_na", TILE_OPC_LW_NA, 0x2 /* pipes */, 2 /* num_operands */, | ||
3887 | TREG_ZERO, /* implicitly_written_register */ | ||
3888 | 1, /* can_bundle */ | ||
3889 | { | ||
3890 | /* operands */ | ||
3891 | { 0, }, | ||
3892 | { 9, 10 }, | ||
3893 | { 0, }, | ||
3894 | { 0, }, | ||
3895 | { 0, } | ||
3896 | }, | ||
3897 | { | ||
3898 | /* fixed_bit_masks */ | ||
3899 | 0ULL, | ||
3900 | 0xfffff80000000000ULL, | ||
3901 | 0ULL, | ||
3902 | 0ULL, | ||
3903 | 0ULL | ||
3904 | }, | ||
3905 | { | ||
3906 | /* fixed_bit_values */ | ||
3907 | -1ULL, | ||
3908 | 0x400bc00000000000ULL, | ||
3909 | -1ULL, | ||
3910 | -1ULL, | ||
3911 | -1ULL | ||
3912 | } | ||
3913 | }, | ||
3914 | { "lw_na.sn", TILE_OPC_LW_NA_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
3915 | TREG_SN, /* implicitly_written_register */ | ||
3916 | 1, /* can_bundle */ | ||
3917 | { | ||
3918 | /* operands */ | ||
3919 | { 0, }, | ||
3920 | { 9, 10 }, | ||
3921 | { 0, }, | ||
3922 | { 0, }, | ||
3923 | { 0, } | ||
3924 | }, | ||
3925 | { | ||
3926 | /* fixed_bit_masks */ | ||
3927 | 0ULL, | ||
3928 | 0xfffff80000000000ULL, | ||
3929 | 0ULL, | ||
3930 | 0ULL, | ||
3931 | 0ULL | ||
3932 | }, | ||
3933 | { | ||
3934 | /* fixed_bit_values */ | ||
3935 | -1ULL, | ||
3936 | 0x440bc00000000000ULL, | ||
3937 | -1ULL, | ||
3938 | -1ULL, | ||
3939 | -1ULL | ||
3940 | } | ||
3941 | }, | ||
3942 | { "lwadd", TILE_OPC_LWADD, 0x2 /* pipes */, 3 /* num_operands */, | ||
3943 | TREG_ZERO, /* implicitly_written_register */ | ||
3944 | 1, /* can_bundle */ | ||
3945 | { | ||
3946 | /* operands */ | ||
3947 | { 0, }, | ||
3948 | { 9, 24, 1 }, | ||
3949 | { 0, }, | ||
3950 | { 0, }, | ||
3951 | { 0, } | ||
3952 | }, | ||
3953 | { | ||
3954 | /* fixed_bit_masks */ | ||
3955 | 0ULL, | ||
3956 | 0xfff8000000000000ULL, | ||
3957 | 0ULL, | ||
3958 | 0ULL, | ||
3959 | 0ULL | ||
3960 | }, | ||
3961 | { | ||
3962 | /* fixed_bit_values */ | ||
3963 | -1ULL, | ||
3964 | 0x30d0000000000000ULL, | ||
3965 | -1ULL, | ||
3966 | -1ULL, | ||
3967 | -1ULL | ||
3968 | } | ||
3969 | }, | ||
3970 | { "lwadd.sn", TILE_OPC_LWADD_SN, 0x2 /* pipes */, 3 /* num_operands */, | ||
3971 | TREG_SN, /* implicitly_written_register */ | ||
3972 | 1, /* can_bundle */ | ||
3973 | { | ||
3974 | /* operands */ | ||
3975 | { 0, }, | ||
3976 | { 9, 24, 1 }, | ||
3977 | { 0, }, | ||
3978 | { 0, }, | ||
3979 | { 0, } | ||
3980 | }, | ||
3981 | { | ||
3982 | /* fixed_bit_masks */ | ||
3983 | 0ULL, | ||
3984 | 0xfff8000000000000ULL, | ||
3985 | 0ULL, | ||
3986 | 0ULL, | ||
3987 | 0ULL | ||
3988 | }, | ||
3989 | { | ||
3990 | /* fixed_bit_values */ | ||
3991 | -1ULL, | ||
3992 | 0x34d0000000000000ULL, | ||
3993 | -1ULL, | ||
3994 | -1ULL, | ||
3995 | -1ULL | ||
3996 | } | ||
3997 | }, | ||
3998 | { "lwadd_na", TILE_OPC_LWADD_NA, 0x2 /* pipes */, 3 /* num_operands */, | ||
3999 | TREG_ZERO, /* implicitly_written_register */ | ||
4000 | 1, /* can_bundle */ | ||
4001 | { | ||
4002 | /* operands */ | ||
4003 | { 0, }, | ||
4004 | { 9, 24, 1 }, | ||
4005 | { 0, }, | ||
4006 | { 0, }, | ||
4007 | { 0, } | ||
4008 | }, | ||
4009 | { | ||
4010 | /* fixed_bit_masks */ | ||
4011 | 0ULL, | ||
4012 | 0xfff8000000000000ULL, | ||
4013 | 0ULL, | ||
4014 | 0ULL, | ||
4015 | 0ULL | ||
4016 | }, | ||
4017 | { | ||
4018 | /* fixed_bit_values */ | ||
4019 | -1ULL, | ||
4020 | 0x30d8000000000000ULL, | ||
4021 | -1ULL, | ||
4022 | -1ULL, | ||
4023 | -1ULL | ||
4024 | } | ||
4025 | }, | ||
4026 | { "lwadd_na.sn", TILE_OPC_LWADD_NA_SN, 0x2 /* pipes */, 3 /* num_operands */, | ||
4027 | TREG_SN, /* implicitly_written_register */ | ||
4028 | 1, /* can_bundle */ | ||
4029 | { | ||
4030 | /* operands */ | ||
4031 | { 0, }, | ||
4032 | { 9, 24, 1 }, | ||
4033 | { 0, }, | ||
4034 | { 0, }, | ||
4035 | { 0, } | ||
4036 | }, | ||
4037 | { | ||
4038 | /* fixed_bit_masks */ | ||
4039 | 0ULL, | ||
4040 | 0xfff8000000000000ULL, | ||
4041 | 0ULL, | ||
4042 | 0ULL, | ||
4043 | 0ULL | ||
4044 | }, | ||
4045 | { | ||
4046 | /* fixed_bit_values */ | ||
4047 | -1ULL, | ||
4048 | 0x34d8000000000000ULL, | ||
4049 | -1ULL, | ||
4050 | -1ULL, | ||
4051 | -1ULL | ||
4052 | } | ||
4053 | }, | ||
4054 | { "maxb_u", TILE_OPC_MAXB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
4055 | TREG_ZERO, /* implicitly_written_register */ | ||
4056 | 1, /* can_bundle */ | ||
4057 | { | ||
4058 | /* operands */ | ||
4059 | { 7, 8, 16 }, | ||
4060 | { 9, 10, 17 }, | ||
4061 | { 0, }, | ||
4062 | { 0, }, | ||
4063 | { 0, } | ||
4064 | }, | ||
4065 | { | ||
4066 | /* fixed_bit_masks */ | ||
4067 | 0x800000007ffc0000ULL, | ||
4068 | 0xfffe000000000000ULL, | ||
4069 | 0ULL, | ||
4070 | 0ULL, | ||
4071 | 0ULL | ||
4072 | }, | ||
4073 | { | ||
4074 | /* fixed_bit_values */ | ||
4075 | 0x00000000003c0000ULL, | ||
4076 | 0x081c000000000000ULL, | ||
4077 | -1ULL, | ||
4078 | -1ULL, | ||
4079 | -1ULL | ||
4080 | } | ||
4081 | }, | ||
4082 | { "maxb_u.sn", TILE_OPC_MAXB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4083 | TREG_SN, /* implicitly_written_register */ | ||
4084 | 1, /* can_bundle */ | ||
4085 | { | ||
4086 | /* operands */ | ||
4087 | { 7, 8, 16 }, | ||
4088 | { 9, 10, 17 }, | ||
4089 | { 0, }, | ||
4090 | { 0, }, | ||
4091 | { 0, } | ||
4092 | }, | ||
4093 | { | ||
4094 | /* fixed_bit_masks */ | ||
4095 | 0x800000007ffc0000ULL, | ||
4096 | 0xfffe000000000000ULL, | ||
4097 | 0ULL, | ||
4098 | 0ULL, | ||
4099 | 0ULL | ||
4100 | }, | ||
4101 | { | ||
4102 | /* fixed_bit_values */ | ||
4103 | 0x00000000083c0000ULL, | ||
4104 | 0x0c1c000000000000ULL, | ||
4105 | -1ULL, | ||
4106 | -1ULL, | ||
4107 | -1ULL | ||
4108 | } | ||
4109 | }, | ||
4110 | { "maxh", TILE_OPC_MAXH, 0x3 /* pipes */, 3 /* num_operands */, | ||
4111 | TREG_ZERO, /* implicitly_written_register */ | ||
4112 | 1, /* can_bundle */ | ||
4113 | { | ||
4114 | /* operands */ | ||
4115 | { 7, 8, 16 }, | ||
4116 | { 9, 10, 17 }, | ||
4117 | { 0, }, | ||
4118 | { 0, }, | ||
4119 | { 0, } | ||
4120 | }, | ||
4121 | { | ||
4122 | /* fixed_bit_masks */ | ||
4123 | 0x800000007ffc0000ULL, | ||
4124 | 0xfffe000000000000ULL, | ||
4125 | 0ULL, | ||
4126 | 0ULL, | ||
4127 | 0ULL | ||
4128 | }, | ||
4129 | { | ||
4130 | /* fixed_bit_values */ | ||
4131 | 0x0000000000400000ULL, | ||
4132 | 0x081e000000000000ULL, | ||
4133 | -1ULL, | ||
4134 | -1ULL, | ||
4135 | -1ULL | ||
4136 | } | ||
4137 | }, | ||
4138 | { "maxh.sn", TILE_OPC_MAXH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4139 | TREG_SN, /* implicitly_written_register */ | ||
4140 | 1, /* can_bundle */ | ||
4141 | { | ||
4142 | /* operands */ | ||
4143 | { 7, 8, 16 }, | ||
4144 | { 9, 10, 17 }, | ||
4145 | { 0, }, | ||
4146 | { 0, }, | ||
4147 | { 0, } | ||
4148 | }, | ||
4149 | { | ||
4150 | /* fixed_bit_masks */ | ||
4151 | 0x800000007ffc0000ULL, | ||
4152 | 0xfffe000000000000ULL, | ||
4153 | 0ULL, | ||
4154 | 0ULL, | ||
4155 | 0ULL | ||
4156 | }, | ||
4157 | { | ||
4158 | /* fixed_bit_values */ | ||
4159 | 0x0000000008400000ULL, | ||
4160 | 0x0c1e000000000000ULL, | ||
4161 | -1ULL, | ||
4162 | -1ULL, | ||
4163 | -1ULL | ||
4164 | } | ||
4165 | }, | ||
4166 | { "maxib_u", TILE_OPC_MAXIB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
4167 | TREG_ZERO, /* implicitly_written_register */ | ||
4168 | 1, /* can_bundle */ | ||
4169 | { | ||
4170 | /* operands */ | ||
4171 | { 7, 8, 0 }, | ||
4172 | { 9, 10, 1 }, | ||
4173 | { 0, }, | ||
4174 | { 0, }, | ||
4175 | { 0, } | ||
4176 | }, | ||
4177 | { | ||
4178 | /* fixed_bit_masks */ | ||
4179 | 0x800000007ff00000ULL, | ||
4180 | 0xfff8000000000000ULL, | ||
4181 | 0ULL, | ||
4182 | 0ULL, | ||
4183 | 0ULL | ||
4184 | }, | ||
4185 | { | ||
4186 | /* fixed_bit_values */ | ||
4187 | 0x0000000040400000ULL, | ||
4188 | 0x3028000000000000ULL, | ||
4189 | -1ULL, | ||
4190 | -1ULL, | ||
4191 | -1ULL | ||
4192 | } | ||
4193 | }, | ||
4194 | { "maxib_u.sn", TILE_OPC_MAXIB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4195 | TREG_SN, /* implicitly_written_register */ | ||
4196 | 1, /* can_bundle */ | ||
4197 | { | ||
4198 | /* operands */ | ||
4199 | { 7, 8, 0 }, | ||
4200 | { 9, 10, 1 }, | ||
4201 | { 0, }, | ||
4202 | { 0, }, | ||
4203 | { 0, } | ||
4204 | }, | ||
4205 | { | ||
4206 | /* fixed_bit_masks */ | ||
4207 | 0x800000007ff00000ULL, | ||
4208 | 0xfff8000000000000ULL, | ||
4209 | 0ULL, | ||
4210 | 0ULL, | ||
4211 | 0ULL | ||
4212 | }, | ||
4213 | { | ||
4214 | /* fixed_bit_values */ | ||
4215 | 0x0000000048400000ULL, | ||
4216 | 0x3428000000000000ULL, | ||
4217 | -1ULL, | ||
4218 | -1ULL, | ||
4219 | -1ULL | ||
4220 | } | ||
4221 | }, | ||
4222 | { "maxih", TILE_OPC_MAXIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
4223 | TREG_ZERO, /* implicitly_written_register */ | ||
4224 | 1, /* can_bundle */ | ||
4225 | { | ||
4226 | /* operands */ | ||
4227 | { 7, 8, 0 }, | ||
4228 | { 9, 10, 1 }, | ||
4229 | { 0, }, | ||
4230 | { 0, }, | ||
4231 | { 0, } | ||
4232 | }, | ||
4233 | { | ||
4234 | /* fixed_bit_masks */ | ||
4235 | 0x800000007ff00000ULL, | ||
4236 | 0xfff8000000000000ULL, | ||
4237 | 0ULL, | ||
4238 | 0ULL, | ||
4239 | 0ULL | ||
4240 | }, | ||
4241 | { | ||
4242 | /* fixed_bit_values */ | ||
4243 | 0x0000000040500000ULL, | ||
4244 | 0x3030000000000000ULL, | ||
4245 | -1ULL, | ||
4246 | -1ULL, | ||
4247 | -1ULL | ||
4248 | } | ||
4249 | }, | ||
4250 | { "maxih.sn", TILE_OPC_MAXIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4251 | TREG_SN, /* implicitly_written_register */ | ||
4252 | 1, /* can_bundle */ | ||
4253 | { | ||
4254 | /* operands */ | ||
4255 | { 7, 8, 0 }, | ||
4256 | { 9, 10, 1 }, | ||
4257 | { 0, }, | ||
4258 | { 0, }, | ||
4259 | { 0, } | ||
4260 | }, | ||
4261 | { | ||
4262 | /* fixed_bit_masks */ | ||
4263 | 0x800000007ff00000ULL, | ||
4264 | 0xfff8000000000000ULL, | ||
4265 | 0ULL, | ||
4266 | 0ULL, | ||
4267 | 0ULL | ||
4268 | }, | ||
4269 | { | ||
4270 | /* fixed_bit_values */ | ||
4271 | 0x0000000048500000ULL, | ||
4272 | 0x3430000000000000ULL, | ||
4273 | -1ULL, | ||
4274 | -1ULL, | ||
4275 | -1ULL | ||
4276 | } | ||
4277 | }, | ||
4278 | { "mf", TILE_OPC_MF, 0x2 /* pipes */, 0 /* num_operands */, | ||
4279 | TREG_ZERO, /* implicitly_written_register */ | ||
4280 | 1, /* can_bundle */ | ||
4281 | { | ||
4282 | /* operands */ | ||
4283 | { 0, }, | ||
4284 | { }, | ||
4285 | { 0, }, | ||
4286 | { 0, }, | ||
4287 | { 0, } | ||
4288 | }, | ||
4289 | { | ||
4290 | /* fixed_bit_masks */ | ||
4291 | 0ULL, | ||
4292 | 0xfbfff80000000000ULL, | ||
4293 | 0ULL, | ||
4294 | 0ULL, | ||
4295 | 0ULL | ||
4296 | }, | ||
4297 | { | ||
4298 | /* fixed_bit_values */ | ||
4299 | -1ULL, | ||
4300 | 0x400b780000000000ULL, | ||
4301 | -1ULL, | ||
4302 | -1ULL, | ||
4303 | -1ULL | ||
4304 | } | ||
4305 | }, | ||
4306 | { "mfspr", TILE_OPC_MFSPR, 0x2 /* pipes */, 2 /* num_operands */, | ||
4307 | TREG_ZERO, /* implicitly_written_register */ | ||
4308 | 1, /* can_bundle */ | ||
4309 | { | ||
4310 | /* operands */ | ||
4311 | { 0, }, | ||
4312 | { 9, 25 }, | ||
4313 | { 0, }, | ||
4314 | { 0, }, | ||
4315 | { 0, } | ||
4316 | }, | ||
4317 | { | ||
4318 | /* fixed_bit_masks */ | ||
4319 | 0ULL, | ||
4320 | 0xfbf8000000000000ULL, | ||
4321 | 0ULL, | ||
4322 | 0ULL, | ||
4323 | 0ULL | ||
4324 | }, | ||
4325 | { | ||
4326 | /* fixed_bit_values */ | ||
4327 | -1ULL, | ||
4328 | 0x3038000000000000ULL, | ||
4329 | -1ULL, | ||
4330 | -1ULL, | ||
4331 | -1ULL | ||
4332 | } | ||
4333 | }, | ||
4334 | { "minb_u", TILE_OPC_MINB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
4335 | TREG_ZERO, /* implicitly_written_register */ | ||
4336 | 1, /* can_bundle */ | ||
4337 | { | ||
4338 | /* operands */ | ||
4339 | { 7, 8, 16 }, | ||
4340 | { 9, 10, 17 }, | ||
4341 | { 0, }, | ||
4342 | { 0, }, | ||
4343 | { 0, } | ||
4344 | }, | ||
4345 | { | ||
4346 | /* fixed_bit_masks */ | ||
4347 | 0x800000007ffc0000ULL, | ||
4348 | 0xfffe000000000000ULL, | ||
4349 | 0ULL, | ||
4350 | 0ULL, | ||
4351 | 0ULL | ||
4352 | }, | ||
4353 | { | ||
4354 | /* fixed_bit_values */ | ||
4355 | 0x0000000000440000ULL, | ||
4356 | 0x0820000000000000ULL, | ||
4357 | -1ULL, | ||
4358 | -1ULL, | ||
4359 | -1ULL | ||
4360 | } | ||
4361 | }, | ||
4362 | { "minb_u.sn", TILE_OPC_MINB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4363 | TREG_SN, /* implicitly_written_register */ | ||
4364 | 1, /* can_bundle */ | ||
4365 | { | ||
4366 | /* operands */ | ||
4367 | { 7, 8, 16 }, | ||
4368 | { 9, 10, 17 }, | ||
4369 | { 0, }, | ||
4370 | { 0, }, | ||
4371 | { 0, } | ||
4372 | }, | ||
4373 | { | ||
4374 | /* fixed_bit_masks */ | ||
4375 | 0x800000007ffc0000ULL, | ||
4376 | 0xfffe000000000000ULL, | ||
4377 | 0ULL, | ||
4378 | 0ULL, | ||
4379 | 0ULL | ||
4380 | }, | ||
4381 | { | ||
4382 | /* fixed_bit_values */ | ||
4383 | 0x0000000008440000ULL, | ||
4384 | 0x0c20000000000000ULL, | ||
4385 | -1ULL, | ||
4386 | -1ULL, | ||
4387 | -1ULL | ||
4388 | } | ||
4389 | }, | ||
4390 | { "minh", TILE_OPC_MINH, 0x3 /* pipes */, 3 /* num_operands */, | ||
4391 | TREG_ZERO, /* implicitly_written_register */ | ||
4392 | 1, /* can_bundle */ | ||
4393 | { | ||
4394 | /* operands */ | ||
4395 | { 7, 8, 16 }, | ||
4396 | { 9, 10, 17 }, | ||
4397 | { 0, }, | ||
4398 | { 0, }, | ||
4399 | { 0, } | ||
4400 | }, | ||
4401 | { | ||
4402 | /* fixed_bit_masks */ | ||
4403 | 0x800000007ffc0000ULL, | ||
4404 | 0xfffe000000000000ULL, | ||
4405 | 0ULL, | ||
4406 | 0ULL, | ||
4407 | 0ULL | ||
4408 | }, | ||
4409 | { | ||
4410 | /* fixed_bit_values */ | ||
4411 | 0x0000000000480000ULL, | ||
4412 | 0x0822000000000000ULL, | ||
4413 | -1ULL, | ||
4414 | -1ULL, | ||
4415 | -1ULL | ||
4416 | } | ||
4417 | }, | ||
4418 | { "minh.sn", TILE_OPC_MINH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4419 | TREG_SN, /* implicitly_written_register */ | ||
4420 | 1, /* can_bundle */ | ||
4421 | { | ||
4422 | /* operands */ | ||
4423 | { 7, 8, 16 }, | ||
4424 | { 9, 10, 17 }, | ||
4425 | { 0, }, | ||
4426 | { 0, }, | ||
4427 | { 0, } | ||
4428 | }, | ||
4429 | { | ||
4430 | /* fixed_bit_masks */ | ||
4431 | 0x800000007ffc0000ULL, | ||
4432 | 0xfffe000000000000ULL, | ||
4433 | 0ULL, | ||
4434 | 0ULL, | ||
4435 | 0ULL | ||
4436 | }, | ||
4437 | { | ||
4438 | /* fixed_bit_values */ | ||
4439 | 0x0000000008480000ULL, | ||
4440 | 0x0c22000000000000ULL, | ||
4441 | -1ULL, | ||
4442 | -1ULL, | ||
4443 | -1ULL | ||
4444 | } | ||
4445 | }, | ||
4446 | { "minib_u", TILE_OPC_MINIB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
4447 | TREG_ZERO, /* implicitly_written_register */ | ||
4448 | 1, /* can_bundle */ | ||
4449 | { | ||
4450 | /* operands */ | ||
4451 | { 7, 8, 0 }, | ||
4452 | { 9, 10, 1 }, | ||
4453 | { 0, }, | ||
4454 | { 0, }, | ||
4455 | { 0, } | ||
4456 | }, | ||
4457 | { | ||
4458 | /* fixed_bit_masks */ | ||
4459 | 0x800000007ff00000ULL, | ||
4460 | 0xfff8000000000000ULL, | ||
4461 | 0ULL, | ||
4462 | 0ULL, | ||
4463 | 0ULL | ||
4464 | }, | ||
4465 | { | ||
4466 | /* fixed_bit_values */ | ||
4467 | 0x0000000040600000ULL, | ||
4468 | 0x3040000000000000ULL, | ||
4469 | -1ULL, | ||
4470 | -1ULL, | ||
4471 | -1ULL | ||
4472 | } | ||
4473 | }, | ||
4474 | { "minib_u.sn", TILE_OPC_MINIB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4475 | TREG_SN, /* implicitly_written_register */ | ||
4476 | 1, /* can_bundle */ | ||
4477 | { | ||
4478 | /* operands */ | ||
4479 | { 7, 8, 0 }, | ||
4480 | { 9, 10, 1 }, | ||
4481 | { 0, }, | ||
4482 | { 0, }, | ||
4483 | { 0, } | ||
4484 | }, | ||
4485 | { | ||
4486 | /* fixed_bit_masks */ | ||
4487 | 0x800000007ff00000ULL, | ||
4488 | 0xfff8000000000000ULL, | ||
4489 | 0ULL, | ||
4490 | 0ULL, | ||
4491 | 0ULL | ||
4492 | }, | ||
4493 | { | ||
4494 | /* fixed_bit_values */ | ||
4495 | 0x0000000048600000ULL, | ||
4496 | 0x3440000000000000ULL, | ||
4497 | -1ULL, | ||
4498 | -1ULL, | ||
4499 | -1ULL | ||
4500 | } | ||
4501 | }, | ||
4502 | { "minih", TILE_OPC_MINIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
4503 | TREG_ZERO, /* implicitly_written_register */ | ||
4504 | 1, /* can_bundle */ | ||
4505 | { | ||
4506 | /* operands */ | ||
4507 | { 7, 8, 0 }, | ||
4508 | { 9, 10, 1 }, | ||
4509 | { 0, }, | ||
4510 | { 0, }, | ||
4511 | { 0, } | ||
4512 | }, | ||
4513 | { | ||
4514 | /* fixed_bit_masks */ | ||
4515 | 0x800000007ff00000ULL, | ||
4516 | 0xfff8000000000000ULL, | ||
4517 | 0ULL, | ||
4518 | 0ULL, | ||
4519 | 0ULL | ||
4520 | }, | ||
4521 | { | ||
4522 | /* fixed_bit_values */ | ||
4523 | 0x0000000040700000ULL, | ||
4524 | 0x3048000000000000ULL, | ||
4525 | -1ULL, | ||
4526 | -1ULL, | ||
4527 | -1ULL | ||
4528 | } | ||
4529 | }, | ||
4530 | { "minih.sn", TILE_OPC_MINIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4531 | TREG_SN, /* implicitly_written_register */ | ||
4532 | 1, /* can_bundle */ | ||
4533 | { | ||
4534 | /* operands */ | ||
4535 | { 7, 8, 0 }, | ||
4536 | { 9, 10, 1 }, | ||
4537 | { 0, }, | ||
4538 | { 0, }, | ||
4539 | { 0, } | ||
4540 | }, | ||
4541 | { | ||
4542 | /* fixed_bit_masks */ | ||
4543 | 0x800000007ff00000ULL, | ||
4544 | 0xfff8000000000000ULL, | ||
4545 | 0ULL, | ||
4546 | 0ULL, | ||
4547 | 0ULL | ||
4548 | }, | ||
4549 | { | ||
4550 | /* fixed_bit_values */ | ||
4551 | 0x0000000048700000ULL, | ||
4552 | 0x3448000000000000ULL, | ||
4553 | -1ULL, | ||
4554 | -1ULL, | ||
4555 | -1ULL | ||
4556 | } | ||
4557 | }, | ||
4558 | { "mm", TILE_OPC_MM, 0x3 /* pipes */, 5 /* num_operands */, | ||
4559 | TREG_ZERO, /* implicitly_written_register */ | ||
4560 | 1, /* can_bundle */ | ||
4561 | { | ||
4562 | /* operands */ | ||
4563 | { 7, 8, 16, 26, 27 }, | ||
4564 | { 9, 10, 17, 28, 29 }, | ||
4565 | { 0, }, | ||
4566 | { 0, }, | ||
4567 | { 0, } | ||
4568 | }, | ||
4569 | { | ||
4570 | /* fixed_bit_masks */ | ||
4571 | 0x8000000070000000ULL, | ||
4572 | 0xf800000000000000ULL, | ||
4573 | 0ULL, | ||
4574 | 0ULL, | ||
4575 | 0ULL | ||
4576 | }, | ||
4577 | { | ||
4578 | /* fixed_bit_values */ | ||
4579 | 0x0000000060000000ULL, | ||
4580 | 0x3800000000000000ULL, | ||
4581 | -1ULL, | ||
4582 | -1ULL, | ||
4583 | -1ULL | ||
4584 | } | ||
4585 | }, | ||
4586 | { "mnz", TILE_OPC_MNZ, 0xf /* pipes */, 3 /* num_operands */, | ||
4587 | TREG_ZERO, /* implicitly_written_register */ | ||
4588 | 1, /* can_bundle */ | ||
4589 | { | ||
4590 | /* operands */ | ||
4591 | { 7, 8, 16 }, | ||
4592 | { 9, 10, 17 }, | ||
4593 | { 11, 12, 18 }, | ||
4594 | { 13, 14, 19 }, | ||
4595 | { 0, } | ||
4596 | }, | ||
4597 | { | ||
4598 | /* fixed_bit_masks */ | ||
4599 | 0x800000007ffc0000ULL, | ||
4600 | 0xfffe000000000000ULL, | ||
4601 | 0x80000000780c0000ULL, | ||
4602 | 0xf806000000000000ULL, | ||
4603 | 0ULL | ||
4604 | }, | ||
4605 | { | ||
4606 | /* fixed_bit_values */ | ||
4607 | 0x0000000000540000ULL, | ||
4608 | 0x0828000000000000ULL, | ||
4609 | 0x8000000010000000ULL, | ||
4610 | 0x9002000000000000ULL, | ||
4611 | -1ULL | ||
4612 | } | ||
4613 | }, | ||
4614 | { "mnz.sn", TILE_OPC_MNZ_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4615 | TREG_SN, /* implicitly_written_register */ | ||
4616 | 1, /* can_bundle */ | ||
4617 | { | ||
4618 | /* operands */ | ||
4619 | { 7, 8, 16 }, | ||
4620 | { 9, 10, 17 }, | ||
4621 | { 0, }, | ||
4622 | { 0, }, | ||
4623 | { 0, } | ||
4624 | }, | ||
4625 | { | ||
4626 | /* fixed_bit_masks */ | ||
4627 | 0x800000007ffc0000ULL, | ||
4628 | 0xfffe000000000000ULL, | ||
4629 | 0ULL, | ||
4630 | 0ULL, | ||
4631 | 0ULL | ||
4632 | }, | ||
4633 | { | ||
4634 | /* fixed_bit_values */ | ||
4635 | 0x0000000008540000ULL, | ||
4636 | 0x0c28000000000000ULL, | ||
4637 | -1ULL, | ||
4638 | -1ULL, | ||
4639 | -1ULL | ||
4640 | } | ||
4641 | }, | ||
4642 | { "mnzb", TILE_OPC_MNZB, 0x3 /* pipes */, 3 /* num_operands */, | ||
4643 | TREG_ZERO, /* implicitly_written_register */ | ||
4644 | 1, /* can_bundle */ | ||
4645 | { | ||
4646 | /* operands */ | ||
4647 | { 7, 8, 16 }, | ||
4648 | { 9, 10, 17 }, | ||
4649 | { 0, }, | ||
4650 | { 0, }, | ||
4651 | { 0, } | ||
4652 | }, | ||
4653 | { | ||
4654 | /* fixed_bit_masks */ | ||
4655 | 0x800000007ffc0000ULL, | ||
4656 | 0xfffe000000000000ULL, | ||
4657 | 0ULL, | ||
4658 | 0ULL, | ||
4659 | 0ULL | ||
4660 | }, | ||
4661 | { | ||
4662 | /* fixed_bit_values */ | ||
4663 | 0x00000000004c0000ULL, | ||
4664 | 0x0824000000000000ULL, | ||
4665 | -1ULL, | ||
4666 | -1ULL, | ||
4667 | -1ULL | ||
4668 | } | ||
4669 | }, | ||
4670 | { "mnzb.sn", TILE_OPC_MNZB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4671 | TREG_SN, /* implicitly_written_register */ | ||
4672 | 1, /* can_bundle */ | ||
4673 | { | ||
4674 | /* operands */ | ||
4675 | { 7, 8, 16 }, | ||
4676 | { 9, 10, 17 }, | ||
4677 | { 0, }, | ||
4678 | { 0, }, | ||
4679 | { 0, } | ||
4680 | }, | ||
4681 | { | ||
4682 | /* fixed_bit_masks */ | ||
4683 | 0x800000007ffc0000ULL, | ||
4684 | 0xfffe000000000000ULL, | ||
4685 | 0ULL, | ||
4686 | 0ULL, | ||
4687 | 0ULL | ||
4688 | }, | ||
4689 | { | ||
4690 | /* fixed_bit_values */ | ||
4691 | 0x00000000084c0000ULL, | ||
4692 | 0x0c24000000000000ULL, | ||
4693 | -1ULL, | ||
4694 | -1ULL, | ||
4695 | -1ULL | ||
4696 | } | ||
4697 | }, | ||
4698 | { "mnzh", TILE_OPC_MNZH, 0x3 /* pipes */, 3 /* num_operands */, | ||
4699 | TREG_ZERO, /* implicitly_written_register */ | ||
4700 | 1, /* can_bundle */ | ||
4701 | { | ||
4702 | /* operands */ | ||
4703 | { 7, 8, 16 }, | ||
4704 | { 9, 10, 17 }, | ||
4705 | { 0, }, | ||
4706 | { 0, }, | ||
4707 | { 0, } | ||
4708 | }, | ||
4709 | { | ||
4710 | /* fixed_bit_masks */ | ||
4711 | 0x800000007ffc0000ULL, | ||
4712 | 0xfffe000000000000ULL, | ||
4713 | 0ULL, | ||
4714 | 0ULL, | ||
4715 | 0ULL | ||
4716 | }, | ||
4717 | { | ||
4718 | /* fixed_bit_values */ | ||
4719 | 0x0000000000500000ULL, | ||
4720 | 0x0826000000000000ULL, | ||
4721 | -1ULL, | ||
4722 | -1ULL, | ||
4723 | -1ULL | ||
4724 | } | ||
4725 | }, | ||
4726 | { "mnzh.sn", TILE_OPC_MNZH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
4727 | TREG_SN, /* implicitly_written_register */ | ||
4728 | 1, /* can_bundle */ | ||
4729 | { | ||
4730 | /* operands */ | ||
4731 | { 7, 8, 16 }, | ||
4732 | { 9, 10, 17 }, | ||
4733 | { 0, }, | ||
4734 | { 0, }, | ||
4735 | { 0, } | ||
4736 | }, | ||
4737 | { | ||
4738 | /* fixed_bit_masks */ | ||
4739 | 0x800000007ffc0000ULL, | ||
4740 | 0xfffe000000000000ULL, | ||
4741 | 0ULL, | ||
4742 | 0ULL, | ||
4743 | 0ULL | ||
4744 | }, | ||
4745 | { | ||
4746 | /* fixed_bit_values */ | ||
4747 | 0x0000000008500000ULL, | ||
4748 | 0x0c26000000000000ULL, | ||
4749 | -1ULL, | ||
4750 | -1ULL, | ||
4751 | -1ULL | ||
4752 | } | ||
4753 | }, | ||
4754 | { "mtspr", TILE_OPC_MTSPR, 0x2 /* pipes */, 2 /* num_operands */, | ||
4755 | TREG_ZERO, /* implicitly_written_register */ | ||
4756 | 1, /* can_bundle */ | ||
4757 | { | ||
4758 | /* operands */ | ||
4759 | { 0, }, | ||
4760 | { 30, 10 }, | ||
4761 | { 0, }, | ||
4762 | { 0, }, | ||
4763 | { 0, } | ||
4764 | }, | ||
4765 | { | ||
4766 | /* fixed_bit_masks */ | ||
4767 | 0ULL, | ||
4768 | 0xfbf8000000000000ULL, | ||
4769 | 0ULL, | ||
4770 | 0ULL, | ||
4771 | 0ULL | ||
4772 | }, | ||
4773 | { | ||
4774 | /* fixed_bit_values */ | ||
4775 | -1ULL, | ||
4776 | 0x3050000000000000ULL, | ||
4777 | -1ULL, | ||
4778 | -1ULL, | ||
4779 | -1ULL | ||
4780 | } | ||
4781 | }, | ||
4782 | { "mulhh_ss", TILE_OPC_MULHH_SS, 0x5 /* pipes */, 3 /* num_operands */, | ||
4783 | TREG_ZERO, /* implicitly_written_register */ | ||
4784 | 1, /* can_bundle */ | ||
4785 | { | ||
4786 | /* operands */ | ||
4787 | { 7, 8, 16 }, | ||
4788 | { 0, }, | ||
4789 | { 11, 12, 18 }, | ||
4790 | { 0, }, | ||
4791 | { 0, } | ||
4792 | }, | ||
4793 | { | ||
4794 | /* fixed_bit_masks */ | ||
4795 | 0x800000007ffc0000ULL, | ||
4796 | 0ULL, | ||
4797 | 0x80000000780c0000ULL, | ||
4798 | 0ULL, | ||
4799 | 0ULL | ||
4800 | }, | ||
4801 | { | ||
4802 | /* fixed_bit_values */ | ||
4803 | 0x0000000000680000ULL, | ||
4804 | -1ULL, | ||
4805 | 0x8000000038000000ULL, | ||
4806 | -1ULL, | ||
4807 | -1ULL | ||
4808 | } | ||
4809 | }, | ||
4810 | { "mulhh_ss.sn", TILE_OPC_MULHH_SS_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
4811 | TREG_SN, /* implicitly_written_register */ | ||
4812 | 1, /* can_bundle */ | ||
4813 | { | ||
4814 | /* operands */ | ||
4815 | { 7, 8, 16 }, | ||
4816 | { 0, }, | ||
4817 | { 0, }, | ||
4818 | { 0, }, | ||
4819 | { 0, } | ||
4820 | }, | ||
4821 | { | ||
4822 | /* fixed_bit_masks */ | ||
4823 | 0x800000007ffc0000ULL, | ||
4824 | 0ULL, | ||
4825 | 0ULL, | ||
4826 | 0ULL, | ||
4827 | 0ULL | ||
4828 | }, | ||
4829 | { | ||
4830 | /* fixed_bit_values */ | ||
4831 | 0x0000000008680000ULL, | ||
4832 | -1ULL, | ||
4833 | -1ULL, | ||
4834 | -1ULL, | ||
4835 | -1ULL | ||
4836 | } | ||
4837 | }, | ||
4838 | { "mulhh_su", TILE_OPC_MULHH_SU, 0x1 /* pipes */, 3 /* num_operands */, | ||
4839 | TREG_ZERO, /* implicitly_written_register */ | ||
4840 | 1, /* can_bundle */ | ||
4841 | { | ||
4842 | /* operands */ | ||
4843 | { 7, 8, 16 }, | ||
4844 | { 0, }, | ||
4845 | { 0, }, | ||
4846 | { 0, }, | ||
4847 | { 0, } | ||
4848 | }, | ||
4849 | { | ||
4850 | /* fixed_bit_masks */ | ||
4851 | 0x800000007ffc0000ULL, | ||
4852 | 0ULL, | ||
4853 | 0ULL, | ||
4854 | 0ULL, | ||
4855 | 0ULL | ||
4856 | }, | ||
4857 | { | ||
4858 | /* fixed_bit_values */ | ||
4859 | 0x00000000006c0000ULL, | ||
4860 | -1ULL, | ||
4861 | -1ULL, | ||
4862 | -1ULL, | ||
4863 | -1ULL | ||
4864 | } | ||
4865 | }, | ||
4866 | { "mulhh_su.sn", TILE_OPC_MULHH_SU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
4867 | TREG_SN, /* implicitly_written_register */ | ||
4868 | 1, /* can_bundle */ | ||
4869 | { | ||
4870 | /* operands */ | ||
4871 | { 7, 8, 16 }, | ||
4872 | { 0, }, | ||
4873 | { 0, }, | ||
4874 | { 0, }, | ||
4875 | { 0, } | ||
4876 | }, | ||
4877 | { | ||
4878 | /* fixed_bit_masks */ | ||
4879 | 0x800000007ffc0000ULL, | ||
4880 | 0ULL, | ||
4881 | 0ULL, | ||
4882 | 0ULL, | ||
4883 | 0ULL | ||
4884 | }, | ||
4885 | { | ||
4886 | /* fixed_bit_values */ | ||
4887 | 0x00000000086c0000ULL, | ||
4888 | -1ULL, | ||
4889 | -1ULL, | ||
4890 | -1ULL, | ||
4891 | -1ULL | ||
4892 | } | ||
4893 | }, | ||
4894 | { "mulhh_uu", TILE_OPC_MULHH_UU, 0x5 /* pipes */, 3 /* num_operands */, | ||
4895 | TREG_ZERO, /* implicitly_written_register */ | ||
4896 | 1, /* can_bundle */ | ||
4897 | { | ||
4898 | /* operands */ | ||
4899 | { 7, 8, 16 }, | ||
4900 | { 0, }, | ||
4901 | { 11, 12, 18 }, | ||
4902 | { 0, }, | ||
4903 | { 0, } | ||
4904 | }, | ||
4905 | { | ||
4906 | /* fixed_bit_masks */ | ||
4907 | 0x800000007ffc0000ULL, | ||
4908 | 0ULL, | ||
4909 | 0x80000000780c0000ULL, | ||
4910 | 0ULL, | ||
4911 | 0ULL | ||
4912 | }, | ||
4913 | { | ||
4914 | /* fixed_bit_values */ | ||
4915 | 0x0000000000700000ULL, | ||
4916 | -1ULL, | ||
4917 | 0x8000000038040000ULL, | ||
4918 | -1ULL, | ||
4919 | -1ULL | ||
4920 | } | ||
4921 | }, | ||
4922 | { "mulhh_uu.sn", TILE_OPC_MULHH_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
4923 | TREG_SN, /* implicitly_written_register */ | ||
4924 | 1, /* can_bundle */ | ||
4925 | { | ||
4926 | /* operands */ | ||
4927 | { 7, 8, 16 }, | ||
4928 | { 0, }, | ||
4929 | { 0, }, | ||
4930 | { 0, }, | ||
4931 | { 0, } | ||
4932 | }, | ||
4933 | { | ||
4934 | /* fixed_bit_masks */ | ||
4935 | 0x800000007ffc0000ULL, | ||
4936 | 0ULL, | ||
4937 | 0ULL, | ||
4938 | 0ULL, | ||
4939 | 0ULL | ||
4940 | }, | ||
4941 | { | ||
4942 | /* fixed_bit_values */ | ||
4943 | 0x0000000008700000ULL, | ||
4944 | -1ULL, | ||
4945 | -1ULL, | ||
4946 | -1ULL, | ||
4947 | -1ULL | ||
4948 | } | ||
4949 | }, | ||
4950 | { "mulhha_ss", TILE_OPC_MULHHA_SS, 0x5 /* pipes */, 3 /* num_operands */, | ||
4951 | TREG_ZERO, /* implicitly_written_register */ | ||
4952 | 1, /* can_bundle */ | ||
4953 | { | ||
4954 | /* operands */ | ||
4955 | { 21, 8, 16 }, | ||
4956 | { 0, }, | ||
4957 | { 31, 12, 18 }, | ||
4958 | { 0, }, | ||
4959 | { 0, } | ||
4960 | }, | ||
4961 | { | ||
4962 | /* fixed_bit_masks */ | ||
4963 | 0x800000007ffc0000ULL, | ||
4964 | 0ULL, | ||
4965 | 0x80000000780c0000ULL, | ||
4966 | 0ULL, | ||
4967 | 0ULL | ||
4968 | }, | ||
4969 | { | ||
4970 | /* fixed_bit_values */ | ||
4971 | 0x0000000000580000ULL, | ||
4972 | -1ULL, | ||
4973 | 0x8000000040000000ULL, | ||
4974 | -1ULL, | ||
4975 | -1ULL | ||
4976 | } | ||
4977 | }, | ||
4978 | { "mulhha_ss.sn", TILE_OPC_MULHHA_SS_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
4979 | TREG_SN, /* implicitly_written_register */ | ||
4980 | 1, /* can_bundle */ | ||
4981 | { | ||
4982 | /* operands */ | ||
4983 | { 21, 8, 16 }, | ||
4984 | { 0, }, | ||
4985 | { 0, }, | ||
4986 | { 0, }, | ||
4987 | { 0, } | ||
4988 | }, | ||
4989 | { | ||
4990 | /* fixed_bit_masks */ | ||
4991 | 0x800000007ffc0000ULL, | ||
4992 | 0ULL, | ||
4993 | 0ULL, | ||
4994 | 0ULL, | ||
4995 | 0ULL | ||
4996 | }, | ||
4997 | { | ||
4998 | /* fixed_bit_values */ | ||
4999 | 0x0000000008580000ULL, | ||
5000 | -1ULL, | ||
5001 | -1ULL, | ||
5002 | -1ULL, | ||
5003 | -1ULL | ||
5004 | } | ||
5005 | }, | ||
5006 | { "mulhha_su", TILE_OPC_MULHHA_SU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5007 | TREG_ZERO, /* implicitly_written_register */ | ||
5008 | 1, /* can_bundle */ | ||
5009 | { | ||
5010 | /* operands */ | ||
5011 | { 21, 8, 16 }, | ||
5012 | { 0, }, | ||
5013 | { 0, }, | ||
5014 | { 0, }, | ||
5015 | { 0, } | ||
5016 | }, | ||
5017 | { | ||
5018 | /* fixed_bit_masks */ | ||
5019 | 0x800000007ffc0000ULL, | ||
5020 | 0ULL, | ||
5021 | 0ULL, | ||
5022 | 0ULL, | ||
5023 | 0ULL | ||
5024 | }, | ||
5025 | { | ||
5026 | /* fixed_bit_values */ | ||
5027 | 0x00000000005c0000ULL, | ||
5028 | -1ULL, | ||
5029 | -1ULL, | ||
5030 | -1ULL, | ||
5031 | -1ULL | ||
5032 | } | ||
5033 | }, | ||
5034 | { "mulhha_su.sn", TILE_OPC_MULHHA_SU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5035 | TREG_SN, /* implicitly_written_register */ | ||
5036 | 1, /* can_bundle */ | ||
5037 | { | ||
5038 | /* operands */ | ||
5039 | { 21, 8, 16 }, | ||
5040 | { 0, }, | ||
5041 | { 0, }, | ||
5042 | { 0, }, | ||
5043 | { 0, } | ||
5044 | }, | ||
5045 | { | ||
5046 | /* fixed_bit_masks */ | ||
5047 | 0x800000007ffc0000ULL, | ||
5048 | 0ULL, | ||
5049 | 0ULL, | ||
5050 | 0ULL, | ||
5051 | 0ULL | ||
5052 | }, | ||
5053 | { | ||
5054 | /* fixed_bit_values */ | ||
5055 | 0x00000000085c0000ULL, | ||
5056 | -1ULL, | ||
5057 | -1ULL, | ||
5058 | -1ULL, | ||
5059 | -1ULL | ||
5060 | } | ||
5061 | }, | ||
5062 | { "mulhha_uu", TILE_OPC_MULHHA_UU, 0x5 /* pipes */, 3 /* num_operands */, | ||
5063 | TREG_ZERO, /* implicitly_written_register */ | ||
5064 | 1, /* can_bundle */ | ||
5065 | { | ||
5066 | /* operands */ | ||
5067 | { 21, 8, 16 }, | ||
5068 | { 0, }, | ||
5069 | { 31, 12, 18 }, | ||
5070 | { 0, }, | ||
5071 | { 0, } | ||
5072 | }, | ||
5073 | { | ||
5074 | /* fixed_bit_masks */ | ||
5075 | 0x800000007ffc0000ULL, | ||
5076 | 0ULL, | ||
5077 | 0x80000000780c0000ULL, | ||
5078 | 0ULL, | ||
5079 | 0ULL | ||
5080 | }, | ||
5081 | { | ||
5082 | /* fixed_bit_values */ | ||
5083 | 0x0000000000600000ULL, | ||
5084 | -1ULL, | ||
5085 | 0x8000000040040000ULL, | ||
5086 | -1ULL, | ||
5087 | -1ULL | ||
5088 | } | ||
5089 | }, | ||
5090 | { "mulhha_uu.sn", TILE_OPC_MULHHA_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5091 | TREG_SN, /* implicitly_written_register */ | ||
5092 | 1, /* can_bundle */ | ||
5093 | { | ||
5094 | /* operands */ | ||
5095 | { 21, 8, 16 }, | ||
5096 | { 0, }, | ||
5097 | { 0, }, | ||
5098 | { 0, }, | ||
5099 | { 0, } | ||
5100 | }, | ||
5101 | { | ||
5102 | /* fixed_bit_masks */ | ||
5103 | 0x800000007ffc0000ULL, | ||
5104 | 0ULL, | ||
5105 | 0ULL, | ||
5106 | 0ULL, | ||
5107 | 0ULL | ||
5108 | }, | ||
5109 | { | ||
5110 | /* fixed_bit_values */ | ||
5111 | 0x0000000008600000ULL, | ||
5112 | -1ULL, | ||
5113 | -1ULL, | ||
5114 | -1ULL, | ||
5115 | -1ULL | ||
5116 | } | ||
5117 | }, | ||
5118 | { "mulhhsa_uu", TILE_OPC_MULHHSA_UU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5119 | TREG_ZERO, /* implicitly_written_register */ | ||
5120 | 1, /* can_bundle */ | ||
5121 | { | ||
5122 | /* operands */ | ||
5123 | { 21, 8, 16 }, | ||
5124 | { 0, }, | ||
5125 | { 0, }, | ||
5126 | { 0, }, | ||
5127 | { 0, } | ||
5128 | }, | ||
5129 | { | ||
5130 | /* fixed_bit_masks */ | ||
5131 | 0x800000007ffc0000ULL, | ||
5132 | 0ULL, | ||
5133 | 0ULL, | ||
5134 | 0ULL, | ||
5135 | 0ULL | ||
5136 | }, | ||
5137 | { | ||
5138 | /* fixed_bit_values */ | ||
5139 | 0x0000000000640000ULL, | ||
5140 | -1ULL, | ||
5141 | -1ULL, | ||
5142 | -1ULL, | ||
5143 | -1ULL | ||
5144 | } | ||
5145 | }, | ||
5146 | { "mulhhsa_uu.sn", TILE_OPC_MULHHSA_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5147 | TREG_SN, /* implicitly_written_register */ | ||
5148 | 1, /* can_bundle */ | ||
5149 | { | ||
5150 | /* operands */ | ||
5151 | { 21, 8, 16 }, | ||
5152 | { 0, }, | ||
5153 | { 0, }, | ||
5154 | { 0, }, | ||
5155 | { 0, } | ||
5156 | }, | ||
5157 | { | ||
5158 | /* fixed_bit_masks */ | ||
5159 | 0x800000007ffc0000ULL, | ||
5160 | 0ULL, | ||
5161 | 0ULL, | ||
5162 | 0ULL, | ||
5163 | 0ULL | ||
5164 | }, | ||
5165 | { | ||
5166 | /* fixed_bit_values */ | ||
5167 | 0x0000000008640000ULL, | ||
5168 | -1ULL, | ||
5169 | -1ULL, | ||
5170 | -1ULL, | ||
5171 | -1ULL | ||
5172 | } | ||
5173 | }, | ||
5174 | { "mulhl_ss", TILE_OPC_MULHL_SS, 0x1 /* pipes */, 3 /* num_operands */, | ||
5175 | TREG_ZERO, /* implicitly_written_register */ | ||
5176 | 1, /* can_bundle */ | ||
5177 | { | ||
5178 | /* operands */ | ||
5179 | { 7, 8, 16 }, | ||
5180 | { 0, }, | ||
5181 | { 0, }, | ||
5182 | { 0, }, | ||
5183 | { 0, } | ||
5184 | }, | ||
5185 | { | ||
5186 | /* fixed_bit_masks */ | ||
5187 | 0x800000007ffc0000ULL, | ||
5188 | 0ULL, | ||
5189 | 0ULL, | ||
5190 | 0ULL, | ||
5191 | 0ULL | ||
5192 | }, | ||
5193 | { | ||
5194 | /* fixed_bit_values */ | ||
5195 | 0x0000000000880000ULL, | ||
5196 | -1ULL, | ||
5197 | -1ULL, | ||
5198 | -1ULL, | ||
5199 | -1ULL | ||
5200 | } | ||
5201 | }, | ||
5202 | { "mulhl_ss.sn", TILE_OPC_MULHL_SS_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5203 | TREG_SN, /* implicitly_written_register */ | ||
5204 | 1, /* can_bundle */ | ||
5205 | { | ||
5206 | /* operands */ | ||
5207 | { 7, 8, 16 }, | ||
5208 | { 0, }, | ||
5209 | { 0, }, | ||
5210 | { 0, }, | ||
5211 | { 0, } | ||
5212 | }, | ||
5213 | { | ||
5214 | /* fixed_bit_masks */ | ||
5215 | 0x800000007ffc0000ULL, | ||
5216 | 0ULL, | ||
5217 | 0ULL, | ||
5218 | 0ULL, | ||
5219 | 0ULL | ||
5220 | }, | ||
5221 | { | ||
5222 | /* fixed_bit_values */ | ||
5223 | 0x0000000008880000ULL, | ||
5224 | -1ULL, | ||
5225 | -1ULL, | ||
5226 | -1ULL, | ||
5227 | -1ULL | ||
5228 | } | ||
5229 | }, | ||
5230 | { "mulhl_su", TILE_OPC_MULHL_SU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5231 | TREG_ZERO, /* implicitly_written_register */ | ||
5232 | 1, /* can_bundle */ | ||
5233 | { | ||
5234 | /* operands */ | ||
5235 | { 7, 8, 16 }, | ||
5236 | { 0, }, | ||
5237 | { 0, }, | ||
5238 | { 0, }, | ||
5239 | { 0, } | ||
5240 | }, | ||
5241 | { | ||
5242 | /* fixed_bit_masks */ | ||
5243 | 0x800000007ffc0000ULL, | ||
5244 | 0ULL, | ||
5245 | 0ULL, | ||
5246 | 0ULL, | ||
5247 | 0ULL | ||
5248 | }, | ||
5249 | { | ||
5250 | /* fixed_bit_values */ | ||
5251 | 0x00000000008c0000ULL, | ||
5252 | -1ULL, | ||
5253 | -1ULL, | ||
5254 | -1ULL, | ||
5255 | -1ULL | ||
5256 | } | ||
5257 | }, | ||
5258 | { "mulhl_su.sn", TILE_OPC_MULHL_SU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5259 | TREG_SN, /* implicitly_written_register */ | ||
5260 | 1, /* can_bundle */ | ||
5261 | { | ||
5262 | /* operands */ | ||
5263 | { 7, 8, 16 }, | ||
5264 | { 0, }, | ||
5265 | { 0, }, | ||
5266 | { 0, }, | ||
5267 | { 0, } | ||
5268 | }, | ||
5269 | { | ||
5270 | /* fixed_bit_masks */ | ||
5271 | 0x800000007ffc0000ULL, | ||
5272 | 0ULL, | ||
5273 | 0ULL, | ||
5274 | 0ULL, | ||
5275 | 0ULL | ||
5276 | }, | ||
5277 | { | ||
5278 | /* fixed_bit_values */ | ||
5279 | 0x00000000088c0000ULL, | ||
5280 | -1ULL, | ||
5281 | -1ULL, | ||
5282 | -1ULL, | ||
5283 | -1ULL | ||
5284 | } | ||
5285 | }, | ||
5286 | { "mulhl_us", TILE_OPC_MULHL_US, 0x1 /* pipes */, 3 /* num_operands */, | ||
5287 | TREG_ZERO, /* implicitly_written_register */ | ||
5288 | 1, /* can_bundle */ | ||
5289 | { | ||
5290 | /* operands */ | ||
5291 | { 7, 8, 16 }, | ||
5292 | { 0, }, | ||
5293 | { 0, }, | ||
5294 | { 0, }, | ||
5295 | { 0, } | ||
5296 | }, | ||
5297 | { | ||
5298 | /* fixed_bit_masks */ | ||
5299 | 0x800000007ffc0000ULL, | ||
5300 | 0ULL, | ||
5301 | 0ULL, | ||
5302 | 0ULL, | ||
5303 | 0ULL | ||
5304 | }, | ||
5305 | { | ||
5306 | /* fixed_bit_values */ | ||
5307 | 0x0000000000900000ULL, | ||
5308 | -1ULL, | ||
5309 | -1ULL, | ||
5310 | -1ULL, | ||
5311 | -1ULL | ||
5312 | } | ||
5313 | }, | ||
5314 | { "mulhl_us.sn", TILE_OPC_MULHL_US_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5315 | TREG_SN, /* implicitly_written_register */ | ||
5316 | 1, /* can_bundle */ | ||
5317 | { | ||
5318 | /* operands */ | ||
5319 | { 7, 8, 16 }, | ||
5320 | { 0, }, | ||
5321 | { 0, }, | ||
5322 | { 0, }, | ||
5323 | { 0, } | ||
5324 | }, | ||
5325 | { | ||
5326 | /* fixed_bit_masks */ | ||
5327 | 0x800000007ffc0000ULL, | ||
5328 | 0ULL, | ||
5329 | 0ULL, | ||
5330 | 0ULL, | ||
5331 | 0ULL | ||
5332 | }, | ||
5333 | { | ||
5334 | /* fixed_bit_values */ | ||
5335 | 0x0000000008900000ULL, | ||
5336 | -1ULL, | ||
5337 | -1ULL, | ||
5338 | -1ULL, | ||
5339 | -1ULL | ||
5340 | } | ||
5341 | }, | ||
5342 | { "mulhl_uu", TILE_OPC_MULHL_UU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5343 | TREG_ZERO, /* implicitly_written_register */ | ||
5344 | 1, /* can_bundle */ | ||
5345 | { | ||
5346 | /* operands */ | ||
5347 | { 7, 8, 16 }, | ||
5348 | { 0, }, | ||
5349 | { 0, }, | ||
5350 | { 0, }, | ||
5351 | { 0, } | ||
5352 | }, | ||
5353 | { | ||
5354 | /* fixed_bit_masks */ | ||
5355 | 0x800000007ffc0000ULL, | ||
5356 | 0ULL, | ||
5357 | 0ULL, | ||
5358 | 0ULL, | ||
5359 | 0ULL | ||
5360 | }, | ||
5361 | { | ||
5362 | /* fixed_bit_values */ | ||
5363 | 0x0000000000940000ULL, | ||
5364 | -1ULL, | ||
5365 | -1ULL, | ||
5366 | -1ULL, | ||
5367 | -1ULL | ||
5368 | } | ||
5369 | }, | ||
5370 | { "mulhl_uu.sn", TILE_OPC_MULHL_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5371 | TREG_SN, /* implicitly_written_register */ | ||
5372 | 1, /* can_bundle */ | ||
5373 | { | ||
5374 | /* operands */ | ||
5375 | { 7, 8, 16 }, | ||
5376 | { 0, }, | ||
5377 | { 0, }, | ||
5378 | { 0, }, | ||
5379 | { 0, } | ||
5380 | }, | ||
5381 | { | ||
5382 | /* fixed_bit_masks */ | ||
5383 | 0x800000007ffc0000ULL, | ||
5384 | 0ULL, | ||
5385 | 0ULL, | ||
5386 | 0ULL, | ||
5387 | 0ULL | ||
5388 | }, | ||
5389 | { | ||
5390 | /* fixed_bit_values */ | ||
5391 | 0x0000000008940000ULL, | ||
5392 | -1ULL, | ||
5393 | -1ULL, | ||
5394 | -1ULL, | ||
5395 | -1ULL | ||
5396 | } | ||
5397 | }, | ||
5398 | { "mulhla_ss", TILE_OPC_MULHLA_SS, 0x1 /* pipes */, 3 /* num_operands */, | ||
5399 | TREG_ZERO, /* implicitly_written_register */ | ||
5400 | 1, /* can_bundle */ | ||
5401 | { | ||
5402 | /* operands */ | ||
5403 | { 21, 8, 16 }, | ||
5404 | { 0, }, | ||
5405 | { 0, }, | ||
5406 | { 0, }, | ||
5407 | { 0, } | ||
5408 | }, | ||
5409 | { | ||
5410 | /* fixed_bit_masks */ | ||
5411 | 0x800000007ffc0000ULL, | ||
5412 | 0ULL, | ||
5413 | 0ULL, | ||
5414 | 0ULL, | ||
5415 | 0ULL | ||
5416 | }, | ||
5417 | { | ||
5418 | /* fixed_bit_values */ | ||
5419 | 0x0000000000740000ULL, | ||
5420 | -1ULL, | ||
5421 | -1ULL, | ||
5422 | -1ULL, | ||
5423 | -1ULL | ||
5424 | } | ||
5425 | }, | ||
5426 | { "mulhla_ss.sn", TILE_OPC_MULHLA_SS_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5427 | TREG_SN, /* implicitly_written_register */ | ||
5428 | 1, /* can_bundle */ | ||
5429 | { | ||
5430 | /* operands */ | ||
5431 | { 21, 8, 16 }, | ||
5432 | { 0, }, | ||
5433 | { 0, }, | ||
5434 | { 0, }, | ||
5435 | { 0, } | ||
5436 | }, | ||
5437 | { | ||
5438 | /* fixed_bit_masks */ | ||
5439 | 0x800000007ffc0000ULL, | ||
5440 | 0ULL, | ||
5441 | 0ULL, | ||
5442 | 0ULL, | ||
5443 | 0ULL | ||
5444 | }, | ||
5445 | { | ||
5446 | /* fixed_bit_values */ | ||
5447 | 0x0000000008740000ULL, | ||
5448 | -1ULL, | ||
5449 | -1ULL, | ||
5450 | -1ULL, | ||
5451 | -1ULL | ||
5452 | } | ||
5453 | }, | ||
5454 | { "mulhla_su", TILE_OPC_MULHLA_SU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5455 | TREG_ZERO, /* implicitly_written_register */ | ||
5456 | 1, /* can_bundle */ | ||
5457 | { | ||
5458 | /* operands */ | ||
5459 | { 21, 8, 16 }, | ||
5460 | { 0, }, | ||
5461 | { 0, }, | ||
5462 | { 0, }, | ||
5463 | { 0, } | ||
5464 | }, | ||
5465 | { | ||
5466 | /* fixed_bit_masks */ | ||
5467 | 0x800000007ffc0000ULL, | ||
5468 | 0ULL, | ||
5469 | 0ULL, | ||
5470 | 0ULL, | ||
5471 | 0ULL | ||
5472 | }, | ||
5473 | { | ||
5474 | /* fixed_bit_values */ | ||
5475 | 0x0000000000780000ULL, | ||
5476 | -1ULL, | ||
5477 | -1ULL, | ||
5478 | -1ULL, | ||
5479 | -1ULL | ||
5480 | } | ||
5481 | }, | ||
5482 | { "mulhla_su.sn", TILE_OPC_MULHLA_SU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5483 | TREG_SN, /* implicitly_written_register */ | ||
5484 | 1, /* can_bundle */ | ||
5485 | { | ||
5486 | /* operands */ | ||
5487 | { 21, 8, 16 }, | ||
5488 | { 0, }, | ||
5489 | { 0, }, | ||
5490 | { 0, }, | ||
5491 | { 0, } | ||
5492 | }, | ||
5493 | { | ||
5494 | /* fixed_bit_masks */ | ||
5495 | 0x800000007ffc0000ULL, | ||
5496 | 0ULL, | ||
5497 | 0ULL, | ||
5498 | 0ULL, | ||
5499 | 0ULL | ||
5500 | }, | ||
5501 | { | ||
5502 | /* fixed_bit_values */ | ||
5503 | 0x0000000008780000ULL, | ||
5504 | -1ULL, | ||
5505 | -1ULL, | ||
5506 | -1ULL, | ||
5507 | -1ULL | ||
5508 | } | ||
5509 | }, | ||
5510 | { "mulhla_us", TILE_OPC_MULHLA_US, 0x1 /* pipes */, 3 /* num_operands */, | ||
5511 | TREG_ZERO, /* implicitly_written_register */ | ||
5512 | 1, /* can_bundle */ | ||
5513 | { | ||
5514 | /* operands */ | ||
5515 | { 21, 8, 16 }, | ||
5516 | { 0, }, | ||
5517 | { 0, }, | ||
5518 | { 0, }, | ||
5519 | { 0, } | ||
5520 | }, | ||
5521 | { | ||
5522 | /* fixed_bit_masks */ | ||
5523 | 0x800000007ffc0000ULL, | ||
5524 | 0ULL, | ||
5525 | 0ULL, | ||
5526 | 0ULL, | ||
5527 | 0ULL | ||
5528 | }, | ||
5529 | { | ||
5530 | /* fixed_bit_values */ | ||
5531 | 0x00000000007c0000ULL, | ||
5532 | -1ULL, | ||
5533 | -1ULL, | ||
5534 | -1ULL, | ||
5535 | -1ULL | ||
5536 | } | ||
5537 | }, | ||
5538 | { "mulhla_us.sn", TILE_OPC_MULHLA_US_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5539 | TREG_SN, /* implicitly_written_register */ | ||
5540 | 1, /* can_bundle */ | ||
5541 | { | ||
5542 | /* operands */ | ||
5543 | { 21, 8, 16 }, | ||
5544 | { 0, }, | ||
5545 | { 0, }, | ||
5546 | { 0, }, | ||
5547 | { 0, } | ||
5548 | }, | ||
5549 | { | ||
5550 | /* fixed_bit_masks */ | ||
5551 | 0x800000007ffc0000ULL, | ||
5552 | 0ULL, | ||
5553 | 0ULL, | ||
5554 | 0ULL, | ||
5555 | 0ULL | ||
5556 | }, | ||
5557 | { | ||
5558 | /* fixed_bit_values */ | ||
5559 | 0x00000000087c0000ULL, | ||
5560 | -1ULL, | ||
5561 | -1ULL, | ||
5562 | -1ULL, | ||
5563 | -1ULL | ||
5564 | } | ||
5565 | }, | ||
5566 | { "mulhla_uu", TILE_OPC_MULHLA_UU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5567 | TREG_ZERO, /* implicitly_written_register */ | ||
5568 | 1, /* can_bundle */ | ||
5569 | { | ||
5570 | /* operands */ | ||
5571 | { 21, 8, 16 }, | ||
5572 | { 0, }, | ||
5573 | { 0, }, | ||
5574 | { 0, }, | ||
5575 | { 0, } | ||
5576 | }, | ||
5577 | { | ||
5578 | /* fixed_bit_masks */ | ||
5579 | 0x800000007ffc0000ULL, | ||
5580 | 0ULL, | ||
5581 | 0ULL, | ||
5582 | 0ULL, | ||
5583 | 0ULL | ||
5584 | }, | ||
5585 | { | ||
5586 | /* fixed_bit_values */ | ||
5587 | 0x0000000000800000ULL, | ||
5588 | -1ULL, | ||
5589 | -1ULL, | ||
5590 | -1ULL, | ||
5591 | -1ULL | ||
5592 | } | ||
5593 | }, | ||
5594 | { "mulhla_uu.sn", TILE_OPC_MULHLA_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5595 | TREG_SN, /* implicitly_written_register */ | ||
5596 | 1, /* can_bundle */ | ||
5597 | { | ||
5598 | /* operands */ | ||
5599 | { 21, 8, 16 }, | ||
5600 | { 0, }, | ||
5601 | { 0, }, | ||
5602 | { 0, }, | ||
5603 | { 0, } | ||
5604 | }, | ||
5605 | { | ||
5606 | /* fixed_bit_masks */ | ||
5607 | 0x800000007ffc0000ULL, | ||
5608 | 0ULL, | ||
5609 | 0ULL, | ||
5610 | 0ULL, | ||
5611 | 0ULL | ||
5612 | }, | ||
5613 | { | ||
5614 | /* fixed_bit_values */ | ||
5615 | 0x0000000008800000ULL, | ||
5616 | -1ULL, | ||
5617 | -1ULL, | ||
5618 | -1ULL, | ||
5619 | -1ULL | ||
5620 | } | ||
5621 | }, | ||
5622 | { "mulhlsa_uu", TILE_OPC_MULHLSA_UU, 0x5 /* pipes */, 3 /* num_operands */, | ||
5623 | TREG_ZERO, /* implicitly_written_register */ | ||
5624 | 1, /* can_bundle */ | ||
5625 | { | ||
5626 | /* operands */ | ||
5627 | { 21, 8, 16 }, | ||
5628 | { 0, }, | ||
5629 | { 31, 12, 18 }, | ||
5630 | { 0, }, | ||
5631 | { 0, } | ||
5632 | }, | ||
5633 | { | ||
5634 | /* fixed_bit_masks */ | ||
5635 | 0x800000007ffc0000ULL, | ||
5636 | 0ULL, | ||
5637 | 0x80000000780c0000ULL, | ||
5638 | 0ULL, | ||
5639 | 0ULL | ||
5640 | }, | ||
5641 | { | ||
5642 | /* fixed_bit_values */ | ||
5643 | 0x0000000000840000ULL, | ||
5644 | -1ULL, | ||
5645 | 0x8000000030000000ULL, | ||
5646 | -1ULL, | ||
5647 | -1ULL | ||
5648 | } | ||
5649 | }, | ||
5650 | { "mulhlsa_uu.sn", TILE_OPC_MULHLSA_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5651 | TREG_SN, /* implicitly_written_register */ | ||
5652 | 1, /* can_bundle */ | ||
5653 | { | ||
5654 | /* operands */ | ||
5655 | { 21, 8, 16 }, | ||
5656 | { 0, }, | ||
5657 | { 0, }, | ||
5658 | { 0, }, | ||
5659 | { 0, } | ||
5660 | }, | ||
5661 | { | ||
5662 | /* fixed_bit_masks */ | ||
5663 | 0x800000007ffc0000ULL, | ||
5664 | 0ULL, | ||
5665 | 0ULL, | ||
5666 | 0ULL, | ||
5667 | 0ULL | ||
5668 | }, | ||
5669 | { | ||
5670 | /* fixed_bit_values */ | ||
5671 | 0x0000000008840000ULL, | ||
5672 | -1ULL, | ||
5673 | -1ULL, | ||
5674 | -1ULL, | ||
5675 | -1ULL | ||
5676 | } | ||
5677 | }, | ||
5678 | { "mulll_ss", TILE_OPC_MULLL_SS, 0x5 /* pipes */, 3 /* num_operands */, | ||
5679 | TREG_ZERO, /* implicitly_written_register */ | ||
5680 | 1, /* can_bundle */ | ||
5681 | { | ||
5682 | /* operands */ | ||
5683 | { 7, 8, 16 }, | ||
5684 | { 0, }, | ||
5685 | { 11, 12, 18 }, | ||
5686 | { 0, }, | ||
5687 | { 0, } | ||
5688 | }, | ||
5689 | { | ||
5690 | /* fixed_bit_masks */ | ||
5691 | 0x800000007ffc0000ULL, | ||
5692 | 0ULL, | ||
5693 | 0x80000000780c0000ULL, | ||
5694 | 0ULL, | ||
5695 | 0ULL | ||
5696 | }, | ||
5697 | { | ||
5698 | /* fixed_bit_values */ | ||
5699 | 0x0000000000a80000ULL, | ||
5700 | -1ULL, | ||
5701 | 0x8000000038080000ULL, | ||
5702 | -1ULL, | ||
5703 | -1ULL | ||
5704 | } | ||
5705 | }, | ||
5706 | { "mulll_ss.sn", TILE_OPC_MULLL_SS_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5707 | TREG_SN, /* implicitly_written_register */ | ||
5708 | 1, /* can_bundle */ | ||
5709 | { | ||
5710 | /* operands */ | ||
5711 | { 7, 8, 16 }, | ||
5712 | { 0, }, | ||
5713 | { 0, }, | ||
5714 | { 0, }, | ||
5715 | { 0, } | ||
5716 | }, | ||
5717 | { | ||
5718 | /* fixed_bit_masks */ | ||
5719 | 0x800000007ffc0000ULL, | ||
5720 | 0ULL, | ||
5721 | 0ULL, | ||
5722 | 0ULL, | ||
5723 | 0ULL | ||
5724 | }, | ||
5725 | { | ||
5726 | /* fixed_bit_values */ | ||
5727 | 0x0000000008a80000ULL, | ||
5728 | -1ULL, | ||
5729 | -1ULL, | ||
5730 | -1ULL, | ||
5731 | -1ULL | ||
5732 | } | ||
5733 | }, | ||
5734 | { "mulll_su", TILE_OPC_MULLL_SU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5735 | TREG_ZERO, /* implicitly_written_register */ | ||
5736 | 1, /* can_bundle */ | ||
5737 | { | ||
5738 | /* operands */ | ||
5739 | { 7, 8, 16 }, | ||
5740 | { 0, }, | ||
5741 | { 0, }, | ||
5742 | { 0, }, | ||
5743 | { 0, } | ||
5744 | }, | ||
5745 | { | ||
5746 | /* fixed_bit_masks */ | ||
5747 | 0x800000007ffc0000ULL, | ||
5748 | 0ULL, | ||
5749 | 0ULL, | ||
5750 | 0ULL, | ||
5751 | 0ULL | ||
5752 | }, | ||
5753 | { | ||
5754 | /* fixed_bit_values */ | ||
5755 | 0x0000000000ac0000ULL, | ||
5756 | -1ULL, | ||
5757 | -1ULL, | ||
5758 | -1ULL, | ||
5759 | -1ULL | ||
5760 | } | ||
5761 | }, | ||
5762 | { "mulll_su.sn", TILE_OPC_MULLL_SU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5763 | TREG_SN, /* implicitly_written_register */ | ||
5764 | 1, /* can_bundle */ | ||
5765 | { | ||
5766 | /* operands */ | ||
5767 | { 7, 8, 16 }, | ||
5768 | { 0, }, | ||
5769 | { 0, }, | ||
5770 | { 0, }, | ||
5771 | { 0, } | ||
5772 | }, | ||
5773 | { | ||
5774 | /* fixed_bit_masks */ | ||
5775 | 0x800000007ffc0000ULL, | ||
5776 | 0ULL, | ||
5777 | 0ULL, | ||
5778 | 0ULL, | ||
5779 | 0ULL | ||
5780 | }, | ||
5781 | { | ||
5782 | /* fixed_bit_values */ | ||
5783 | 0x0000000008ac0000ULL, | ||
5784 | -1ULL, | ||
5785 | -1ULL, | ||
5786 | -1ULL, | ||
5787 | -1ULL | ||
5788 | } | ||
5789 | }, | ||
5790 | { "mulll_uu", TILE_OPC_MULLL_UU, 0x5 /* pipes */, 3 /* num_operands */, | ||
5791 | TREG_ZERO, /* implicitly_written_register */ | ||
5792 | 1, /* can_bundle */ | ||
5793 | { | ||
5794 | /* operands */ | ||
5795 | { 7, 8, 16 }, | ||
5796 | { 0, }, | ||
5797 | { 11, 12, 18 }, | ||
5798 | { 0, }, | ||
5799 | { 0, } | ||
5800 | }, | ||
5801 | { | ||
5802 | /* fixed_bit_masks */ | ||
5803 | 0x800000007ffc0000ULL, | ||
5804 | 0ULL, | ||
5805 | 0x80000000780c0000ULL, | ||
5806 | 0ULL, | ||
5807 | 0ULL | ||
5808 | }, | ||
5809 | { | ||
5810 | /* fixed_bit_values */ | ||
5811 | 0x0000000000b00000ULL, | ||
5812 | -1ULL, | ||
5813 | 0x80000000380c0000ULL, | ||
5814 | -1ULL, | ||
5815 | -1ULL | ||
5816 | } | ||
5817 | }, | ||
5818 | { "mulll_uu.sn", TILE_OPC_MULLL_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5819 | TREG_SN, /* implicitly_written_register */ | ||
5820 | 1, /* can_bundle */ | ||
5821 | { | ||
5822 | /* operands */ | ||
5823 | { 7, 8, 16 }, | ||
5824 | { 0, }, | ||
5825 | { 0, }, | ||
5826 | { 0, }, | ||
5827 | { 0, } | ||
5828 | }, | ||
5829 | { | ||
5830 | /* fixed_bit_masks */ | ||
5831 | 0x800000007ffc0000ULL, | ||
5832 | 0ULL, | ||
5833 | 0ULL, | ||
5834 | 0ULL, | ||
5835 | 0ULL | ||
5836 | }, | ||
5837 | { | ||
5838 | /* fixed_bit_values */ | ||
5839 | 0x0000000008b00000ULL, | ||
5840 | -1ULL, | ||
5841 | -1ULL, | ||
5842 | -1ULL, | ||
5843 | -1ULL | ||
5844 | } | ||
5845 | }, | ||
5846 | { "mullla_ss", TILE_OPC_MULLLA_SS, 0x5 /* pipes */, 3 /* num_operands */, | ||
5847 | TREG_ZERO, /* implicitly_written_register */ | ||
5848 | 1, /* can_bundle */ | ||
5849 | { | ||
5850 | /* operands */ | ||
5851 | { 21, 8, 16 }, | ||
5852 | { 0, }, | ||
5853 | { 31, 12, 18 }, | ||
5854 | { 0, }, | ||
5855 | { 0, } | ||
5856 | }, | ||
5857 | { | ||
5858 | /* fixed_bit_masks */ | ||
5859 | 0x800000007ffc0000ULL, | ||
5860 | 0ULL, | ||
5861 | 0x80000000780c0000ULL, | ||
5862 | 0ULL, | ||
5863 | 0ULL | ||
5864 | }, | ||
5865 | { | ||
5866 | /* fixed_bit_values */ | ||
5867 | 0x0000000000980000ULL, | ||
5868 | -1ULL, | ||
5869 | 0x8000000040080000ULL, | ||
5870 | -1ULL, | ||
5871 | -1ULL | ||
5872 | } | ||
5873 | }, | ||
5874 | { "mullla_ss.sn", TILE_OPC_MULLLA_SS_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5875 | TREG_SN, /* implicitly_written_register */ | ||
5876 | 1, /* can_bundle */ | ||
5877 | { | ||
5878 | /* operands */ | ||
5879 | { 21, 8, 16 }, | ||
5880 | { 0, }, | ||
5881 | { 0, }, | ||
5882 | { 0, }, | ||
5883 | { 0, } | ||
5884 | }, | ||
5885 | { | ||
5886 | /* fixed_bit_masks */ | ||
5887 | 0x800000007ffc0000ULL, | ||
5888 | 0ULL, | ||
5889 | 0ULL, | ||
5890 | 0ULL, | ||
5891 | 0ULL | ||
5892 | }, | ||
5893 | { | ||
5894 | /* fixed_bit_values */ | ||
5895 | 0x0000000008980000ULL, | ||
5896 | -1ULL, | ||
5897 | -1ULL, | ||
5898 | -1ULL, | ||
5899 | -1ULL | ||
5900 | } | ||
5901 | }, | ||
5902 | { "mullla_su", TILE_OPC_MULLLA_SU, 0x1 /* pipes */, 3 /* num_operands */, | ||
5903 | TREG_ZERO, /* implicitly_written_register */ | ||
5904 | 1, /* can_bundle */ | ||
5905 | { | ||
5906 | /* operands */ | ||
5907 | { 21, 8, 16 }, | ||
5908 | { 0, }, | ||
5909 | { 0, }, | ||
5910 | { 0, }, | ||
5911 | { 0, } | ||
5912 | }, | ||
5913 | { | ||
5914 | /* fixed_bit_masks */ | ||
5915 | 0x800000007ffc0000ULL, | ||
5916 | 0ULL, | ||
5917 | 0ULL, | ||
5918 | 0ULL, | ||
5919 | 0ULL | ||
5920 | }, | ||
5921 | { | ||
5922 | /* fixed_bit_values */ | ||
5923 | 0x00000000009c0000ULL, | ||
5924 | -1ULL, | ||
5925 | -1ULL, | ||
5926 | -1ULL, | ||
5927 | -1ULL | ||
5928 | } | ||
5929 | }, | ||
5930 | { "mullla_su.sn", TILE_OPC_MULLLA_SU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5931 | TREG_SN, /* implicitly_written_register */ | ||
5932 | 1, /* can_bundle */ | ||
5933 | { | ||
5934 | /* operands */ | ||
5935 | { 21, 8, 16 }, | ||
5936 | { 0, }, | ||
5937 | { 0, }, | ||
5938 | { 0, }, | ||
5939 | { 0, } | ||
5940 | }, | ||
5941 | { | ||
5942 | /* fixed_bit_masks */ | ||
5943 | 0x800000007ffc0000ULL, | ||
5944 | 0ULL, | ||
5945 | 0ULL, | ||
5946 | 0ULL, | ||
5947 | 0ULL | ||
5948 | }, | ||
5949 | { | ||
5950 | /* fixed_bit_values */ | ||
5951 | 0x00000000089c0000ULL, | ||
5952 | -1ULL, | ||
5953 | -1ULL, | ||
5954 | -1ULL, | ||
5955 | -1ULL | ||
5956 | } | ||
5957 | }, | ||
5958 | { "mullla_uu", TILE_OPC_MULLLA_UU, 0x5 /* pipes */, 3 /* num_operands */, | ||
5959 | TREG_ZERO, /* implicitly_written_register */ | ||
5960 | 1, /* can_bundle */ | ||
5961 | { | ||
5962 | /* operands */ | ||
5963 | { 21, 8, 16 }, | ||
5964 | { 0, }, | ||
5965 | { 31, 12, 18 }, | ||
5966 | { 0, }, | ||
5967 | { 0, } | ||
5968 | }, | ||
5969 | { | ||
5970 | /* fixed_bit_masks */ | ||
5971 | 0x800000007ffc0000ULL, | ||
5972 | 0ULL, | ||
5973 | 0x80000000780c0000ULL, | ||
5974 | 0ULL, | ||
5975 | 0ULL | ||
5976 | }, | ||
5977 | { | ||
5978 | /* fixed_bit_values */ | ||
5979 | 0x0000000000a00000ULL, | ||
5980 | -1ULL, | ||
5981 | 0x80000000400c0000ULL, | ||
5982 | -1ULL, | ||
5983 | -1ULL | ||
5984 | } | ||
5985 | }, | ||
5986 | { "mullla_uu.sn", TILE_OPC_MULLLA_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
5987 | TREG_SN, /* implicitly_written_register */ | ||
5988 | 1, /* can_bundle */ | ||
5989 | { | ||
5990 | /* operands */ | ||
5991 | { 21, 8, 16 }, | ||
5992 | { 0, }, | ||
5993 | { 0, }, | ||
5994 | { 0, }, | ||
5995 | { 0, } | ||
5996 | }, | ||
5997 | { | ||
5998 | /* fixed_bit_masks */ | ||
5999 | 0x800000007ffc0000ULL, | ||
6000 | 0ULL, | ||
6001 | 0ULL, | ||
6002 | 0ULL, | ||
6003 | 0ULL | ||
6004 | }, | ||
6005 | { | ||
6006 | /* fixed_bit_values */ | ||
6007 | 0x0000000008a00000ULL, | ||
6008 | -1ULL, | ||
6009 | -1ULL, | ||
6010 | -1ULL, | ||
6011 | -1ULL | ||
6012 | } | ||
6013 | }, | ||
6014 | { "mulllsa_uu", TILE_OPC_MULLLSA_UU, 0x1 /* pipes */, 3 /* num_operands */, | ||
6015 | TREG_ZERO, /* implicitly_written_register */ | ||
6016 | 1, /* can_bundle */ | ||
6017 | { | ||
6018 | /* operands */ | ||
6019 | { 21, 8, 16 }, | ||
6020 | { 0, }, | ||
6021 | { 0, }, | ||
6022 | { 0, }, | ||
6023 | { 0, } | ||
6024 | }, | ||
6025 | { | ||
6026 | /* fixed_bit_masks */ | ||
6027 | 0x800000007ffc0000ULL, | ||
6028 | 0ULL, | ||
6029 | 0ULL, | ||
6030 | 0ULL, | ||
6031 | 0ULL | ||
6032 | }, | ||
6033 | { | ||
6034 | /* fixed_bit_values */ | ||
6035 | 0x0000000000a40000ULL, | ||
6036 | -1ULL, | ||
6037 | -1ULL, | ||
6038 | -1ULL, | ||
6039 | -1ULL | ||
6040 | } | ||
6041 | }, | ||
6042 | { "mulllsa_uu.sn", TILE_OPC_MULLLSA_UU_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
6043 | TREG_SN, /* implicitly_written_register */ | ||
6044 | 1, /* can_bundle */ | ||
6045 | { | ||
6046 | /* operands */ | ||
6047 | { 21, 8, 16 }, | ||
6048 | { 0, }, | ||
6049 | { 0, }, | ||
6050 | { 0, }, | ||
6051 | { 0, } | ||
6052 | }, | ||
6053 | { | ||
6054 | /* fixed_bit_masks */ | ||
6055 | 0x800000007ffc0000ULL, | ||
6056 | 0ULL, | ||
6057 | 0ULL, | ||
6058 | 0ULL, | ||
6059 | 0ULL | ||
6060 | }, | ||
6061 | { | ||
6062 | /* fixed_bit_values */ | ||
6063 | 0x0000000008a40000ULL, | ||
6064 | -1ULL, | ||
6065 | -1ULL, | ||
6066 | -1ULL, | ||
6067 | -1ULL | ||
6068 | } | ||
6069 | }, | ||
6070 | { "mvnz", TILE_OPC_MVNZ, 0x5 /* pipes */, 3 /* num_operands */, | ||
6071 | TREG_ZERO, /* implicitly_written_register */ | ||
6072 | 1, /* can_bundle */ | ||
6073 | { | ||
6074 | /* operands */ | ||
6075 | { 21, 8, 16 }, | ||
6076 | { 0, }, | ||
6077 | { 31, 12, 18 }, | ||
6078 | { 0, }, | ||
6079 | { 0, } | ||
6080 | }, | ||
6081 | { | ||
6082 | /* fixed_bit_masks */ | ||
6083 | 0x800000007ffc0000ULL, | ||
6084 | 0ULL, | ||
6085 | 0x80000000780c0000ULL, | ||
6086 | 0ULL, | ||
6087 | 0ULL | ||
6088 | }, | ||
6089 | { | ||
6090 | /* fixed_bit_values */ | ||
6091 | 0x0000000000b40000ULL, | ||
6092 | -1ULL, | ||
6093 | 0x8000000010040000ULL, | ||
6094 | -1ULL, | ||
6095 | -1ULL | ||
6096 | } | ||
6097 | }, | ||
6098 | { "mvnz.sn", TILE_OPC_MVNZ_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
6099 | TREG_SN, /* implicitly_written_register */ | ||
6100 | 1, /* can_bundle */ | ||
6101 | { | ||
6102 | /* operands */ | ||
6103 | { 21, 8, 16 }, | ||
6104 | { 0, }, | ||
6105 | { 0, }, | ||
6106 | { 0, }, | ||
6107 | { 0, } | ||
6108 | }, | ||
6109 | { | ||
6110 | /* fixed_bit_masks */ | ||
6111 | 0x800000007ffc0000ULL, | ||
6112 | 0ULL, | ||
6113 | 0ULL, | ||
6114 | 0ULL, | ||
6115 | 0ULL | ||
6116 | }, | ||
6117 | { | ||
6118 | /* fixed_bit_values */ | ||
6119 | 0x0000000008b40000ULL, | ||
6120 | -1ULL, | ||
6121 | -1ULL, | ||
6122 | -1ULL, | ||
6123 | -1ULL | ||
6124 | } | ||
6125 | }, | ||
6126 | { "mvz", TILE_OPC_MVZ, 0x5 /* pipes */, 3 /* num_operands */, | ||
6127 | TREG_ZERO, /* implicitly_written_register */ | ||
6128 | 1, /* can_bundle */ | ||
6129 | { | ||
6130 | /* operands */ | ||
6131 | { 21, 8, 16 }, | ||
6132 | { 0, }, | ||
6133 | { 31, 12, 18 }, | ||
6134 | { 0, }, | ||
6135 | { 0, } | ||
6136 | }, | ||
6137 | { | ||
6138 | /* fixed_bit_masks */ | ||
6139 | 0x800000007ffc0000ULL, | ||
6140 | 0ULL, | ||
6141 | 0x80000000780c0000ULL, | ||
6142 | 0ULL, | ||
6143 | 0ULL | ||
6144 | }, | ||
6145 | { | ||
6146 | /* fixed_bit_values */ | ||
6147 | 0x0000000000b80000ULL, | ||
6148 | -1ULL, | ||
6149 | 0x8000000010080000ULL, | ||
6150 | -1ULL, | ||
6151 | -1ULL | ||
6152 | } | ||
6153 | }, | ||
6154 | { "mvz.sn", TILE_OPC_MVZ_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
6155 | TREG_SN, /* implicitly_written_register */ | ||
6156 | 1, /* can_bundle */ | ||
6157 | { | ||
6158 | /* operands */ | ||
6159 | { 21, 8, 16 }, | ||
6160 | { 0, }, | ||
6161 | { 0, }, | ||
6162 | { 0, }, | ||
6163 | { 0, } | ||
6164 | }, | ||
6165 | { | ||
6166 | /* fixed_bit_masks */ | ||
6167 | 0x800000007ffc0000ULL, | ||
6168 | 0ULL, | ||
6169 | 0ULL, | ||
6170 | 0ULL, | ||
6171 | 0ULL | ||
6172 | }, | ||
6173 | { | ||
6174 | /* fixed_bit_values */ | ||
6175 | 0x0000000008b80000ULL, | ||
6176 | -1ULL, | ||
6177 | -1ULL, | ||
6178 | -1ULL, | ||
6179 | -1ULL | ||
6180 | } | ||
6181 | }, | ||
6182 | { "mz", TILE_OPC_MZ, 0xf /* pipes */, 3 /* num_operands */, | ||
6183 | TREG_ZERO, /* implicitly_written_register */ | ||
6184 | 1, /* can_bundle */ | ||
6185 | { | ||
6186 | /* operands */ | ||
6187 | { 7, 8, 16 }, | ||
6188 | { 9, 10, 17 }, | ||
6189 | { 11, 12, 18 }, | ||
6190 | { 13, 14, 19 }, | ||
6191 | { 0, } | ||
6192 | }, | ||
6193 | { | ||
6194 | /* fixed_bit_masks */ | ||
6195 | 0x800000007ffc0000ULL, | ||
6196 | 0xfffe000000000000ULL, | ||
6197 | 0x80000000780c0000ULL, | ||
6198 | 0xf806000000000000ULL, | ||
6199 | 0ULL | ||
6200 | }, | ||
6201 | { | ||
6202 | /* fixed_bit_values */ | ||
6203 | 0x0000000000c40000ULL, | ||
6204 | 0x082e000000000000ULL, | ||
6205 | 0x80000000100c0000ULL, | ||
6206 | 0x9004000000000000ULL, | ||
6207 | -1ULL | ||
6208 | } | ||
6209 | }, | ||
6210 | { "mz.sn", TILE_OPC_MZ_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6211 | TREG_SN, /* implicitly_written_register */ | ||
6212 | 1, /* can_bundle */ | ||
6213 | { | ||
6214 | /* operands */ | ||
6215 | { 7, 8, 16 }, | ||
6216 | { 9, 10, 17 }, | ||
6217 | { 0, }, | ||
6218 | { 0, }, | ||
6219 | { 0, } | ||
6220 | }, | ||
6221 | { | ||
6222 | /* fixed_bit_masks */ | ||
6223 | 0x800000007ffc0000ULL, | ||
6224 | 0xfffe000000000000ULL, | ||
6225 | 0ULL, | ||
6226 | 0ULL, | ||
6227 | 0ULL | ||
6228 | }, | ||
6229 | { | ||
6230 | /* fixed_bit_values */ | ||
6231 | 0x0000000008c40000ULL, | ||
6232 | 0x0c2e000000000000ULL, | ||
6233 | -1ULL, | ||
6234 | -1ULL, | ||
6235 | -1ULL | ||
6236 | } | ||
6237 | }, | ||
6238 | { "mzb", TILE_OPC_MZB, 0x3 /* pipes */, 3 /* num_operands */, | ||
6239 | TREG_ZERO, /* implicitly_written_register */ | ||
6240 | 1, /* can_bundle */ | ||
6241 | { | ||
6242 | /* operands */ | ||
6243 | { 7, 8, 16 }, | ||
6244 | { 9, 10, 17 }, | ||
6245 | { 0, }, | ||
6246 | { 0, }, | ||
6247 | { 0, } | ||
6248 | }, | ||
6249 | { | ||
6250 | /* fixed_bit_masks */ | ||
6251 | 0x800000007ffc0000ULL, | ||
6252 | 0xfffe000000000000ULL, | ||
6253 | 0ULL, | ||
6254 | 0ULL, | ||
6255 | 0ULL | ||
6256 | }, | ||
6257 | { | ||
6258 | /* fixed_bit_values */ | ||
6259 | 0x0000000000bc0000ULL, | ||
6260 | 0x082a000000000000ULL, | ||
6261 | -1ULL, | ||
6262 | -1ULL, | ||
6263 | -1ULL | ||
6264 | } | ||
6265 | }, | ||
6266 | { "mzb.sn", TILE_OPC_MZB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6267 | TREG_SN, /* implicitly_written_register */ | ||
6268 | 1, /* can_bundle */ | ||
6269 | { | ||
6270 | /* operands */ | ||
6271 | { 7, 8, 16 }, | ||
6272 | { 9, 10, 17 }, | ||
6273 | { 0, }, | ||
6274 | { 0, }, | ||
6275 | { 0, } | ||
6276 | }, | ||
6277 | { | ||
6278 | /* fixed_bit_masks */ | ||
6279 | 0x800000007ffc0000ULL, | ||
6280 | 0xfffe000000000000ULL, | ||
6281 | 0ULL, | ||
6282 | 0ULL, | ||
6283 | 0ULL | ||
6284 | }, | ||
6285 | { | ||
6286 | /* fixed_bit_values */ | ||
6287 | 0x0000000008bc0000ULL, | ||
6288 | 0x0c2a000000000000ULL, | ||
6289 | -1ULL, | ||
6290 | -1ULL, | ||
6291 | -1ULL | ||
6292 | } | ||
6293 | }, | ||
6294 | { "mzh", TILE_OPC_MZH, 0x3 /* pipes */, 3 /* num_operands */, | ||
6295 | TREG_ZERO, /* implicitly_written_register */ | ||
6296 | 1, /* can_bundle */ | ||
6297 | { | ||
6298 | /* operands */ | ||
6299 | { 7, 8, 16 }, | ||
6300 | { 9, 10, 17 }, | ||
6301 | { 0, }, | ||
6302 | { 0, }, | ||
6303 | { 0, } | ||
6304 | }, | ||
6305 | { | ||
6306 | /* fixed_bit_masks */ | ||
6307 | 0x800000007ffc0000ULL, | ||
6308 | 0xfffe000000000000ULL, | ||
6309 | 0ULL, | ||
6310 | 0ULL, | ||
6311 | 0ULL | ||
6312 | }, | ||
6313 | { | ||
6314 | /* fixed_bit_values */ | ||
6315 | 0x0000000000c00000ULL, | ||
6316 | 0x082c000000000000ULL, | ||
6317 | -1ULL, | ||
6318 | -1ULL, | ||
6319 | -1ULL | ||
6320 | } | ||
6321 | }, | ||
6322 | { "mzh.sn", TILE_OPC_MZH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6323 | TREG_SN, /* implicitly_written_register */ | ||
6324 | 1, /* can_bundle */ | ||
6325 | { | ||
6326 | /* operands */ | ||
6327 | { 7, 8, 16 }, | ||
6328 | { 9, 10, 17 }, | ||
6329 | { 0, }, | ||
6330 | { 0, }, | ||
6331 | { 0, } | ||
6332 | }, | ||
6333 | { | ||
6334 | /* fixed_bit_masks */ | ||
6335 | 0x800000007ffc0000ULL, | ||
6336 | 0xfffe000000000000ULL, | ||
6337 | 0ULL, | ||
6338 | 0ULL, | ||
6339 | 0ULL | ||
6340 | }, | ||
6341 | { | ||
6342 | /* fixed_bit_values */ | ||
6343 | 0x0000000008c00000ULL, | ||
6344 | 0x0c2c000000000000ULL, | ||
6345 | -1ULL, | ||
6346 | -1ULL, | ||
6347 | -1ULL | ||
6348 | } | ||
6349 | }, | ||
6350 | { "nap", TILE_OPC_NAP, 0x2 /* pipes */, 0 /* num_operands */, | ||
6351 | TREG_ZERO, /* implicitly_written_register */ | ||
6352 | 0, /* can_bundle */ | ||
6353 | { | ||
6354 | /* operands */ | ||
6355 | { 0, }, | ||
6356 | { }, | ||
6357 | { 0, }, | ||
6358 | { 0, }, | ||
6359 | { 0, } | ||
6360 | }, | ||
6361 | { | ||
6362 | /* fixed_bit_masks */ | ||
6363 | 0ULL, | ||
6364 | 0xfbfff80000000000ULL, | ||
6365 | 0ULL, | ||
6366 | 0ULL, | ||
6367 | 0ULL | ||
6368 | }, | ||
6369 | { | ||
6370 | /* fixed_bit_values */ | ||
6371 | -1ULL, | ||
6372 | 0x400b800000000000ULL, | ||
6373 | -1ULL, | ||
6374 | -1ULL, | ||
6375 | -1ULL | ||
6376 | } | ||
6377 | }, | ||
6378 | { "nop", TILE_OPC_NOP, 0xf /* pipes */, 0 /* num_operands */, | ||
6379 | TREG_ZERO, /* implicitly_written_register */ | ||
6380 | 1, /* can_bundle */ | ||
6381 | { | ||
6382 | /* operands */ | ||
6383 | { }, | ||
6384 | { }, | ||
6385 | { }, | ||
6386 | { }, | ||
6387 | { 0, } | ||
6388 | }, | ||
6389 | { | ||
6390 | /* fixed_bit_masks */ | ||
6391 | 0x8000000077fff000ULL, | ||
6392 | 0xfbfff80000000000ULL, | ||
6393 | 0x80000000780ff000ULL, | ||
6394 | 0xf807f80000000000ULL, | ||
6395 | 0ULL | ||
6396 | }, | ||
6397 | { | ||
6398 | /* fixed_bit_values */ | ||
6399 | 0x0000000070166000ULL, | ||
6400 | 0x400b880000000000ULL, | ||
6401 | 0x80000000680a6000ULL, | ||
6402 | 0xd805180000000000ULL, | ||
6403 | -1ULL | ||
6404 | } | ||
6405 | }, | ||
6406 | { "nor", TILE_OPC_NOR, 0xf /* pipes */, 3 /* num_operands */, | ||
6407 | TREG_ZERO, /* implicitly_written_register */ | ||
6408 | 1, /* can_bundle */ | ||
6409 | { | ||
6410 | /* operands */ | ||
6411 | { 7, 8, 16 }, | ||
6412 | { 9, 10, 17 }, | ||
6413 | { 11, 12, 18 }, | ||
6414 | { 13, 14, 19 }, | ||
6415 | { 0, } | ||
6416 | }, | ||
6417 | { | ||
6418 | /* fixed_bit_masks */ | ||
6419 | 0x800000007ffc0000ULL, | ||
6420 | 0xfffe000000000000ULL, | ||
6421 | 0x80000000780c0000ULL, | ||
6422 | 0xf806000000000000ULL, | ||
6423 | 0ULL | ||
6424 | }, | ||
6425 | { | ||
6426 | /* fixed_bit_values */ | ||
6427 | 0x0000000000c80000ULL, | ||
6428 | 0x0830000000000000ULL, | ||
6429 | 0x8000000018040000ULL, | ||
6430 | 0x9802000000000000ULL, | ||
6431 | -1ULL | ||
6432 | } | ||
6433 | }, | ||
6434 | { "nor.sn", TILE_OPC_NOR_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6435 | TREG_SN, /* implicitly_written_register */ | ||
6436 | 1, /* can_bundle */ | ||
6437 | { | ||
6438 | /* operands */ | ||
6439 | { 7, 8, 16 }, | ||
6440 | { 9, 10, 17 }, | ||
6441 | { 0, }, | ||
6442 | { 0, }, | ||
6443 | { 0, } | ||
6444 | }, | ||
6445 | { | ||
6446 | /* fixed_bit_masks */ | ||
6447 | 0x800000007ffc0000ULL, | ||
6448 | 0xfffe000000000000ULL, | ||
6449 | 0ULL, | ||
6450 | 0ULL, | ||
6451 | 0ULL | ||
6452 | }, | ||
6453 | { | ||
6454 | /* fixed_bit_values */ | ||
6455 | 0x0000000008c80000ULL, | ||
6456 | 0x0c30000000000000ULL, | ||
6457 | -1ULL, | ||
6458 | -1ULL, | ||
6459 | -1ULL | ||
6460 | } | ||
6461 | }, | ||
6462 | { "or", TILE_OPC_OR, 0xf /* pipes */, 3 /* num_operands */, | ||
6463 | TREG_ZERO, /* implicitly_written_register */ | ||
6464 | 1, /* can_bundle */ | ||
6465 | { | ||
6466 | /* operands */ | ||
6467 | { 7, 8, 16 }, | ||
6468 | { 9, 10, 17 }, | ||
6469 | { 11, 12, 18 }, | ||
6470 | { 13, 14, 19 }, | ||
6471 | { 0, } | ||
6472 | }, | ||
6473 | { | ||
6474 | /* fixed_bit_masks */ | ||
6475 | 0x800000007ffc0000ULL, | ||
6476 | 0xfffe000000000000ULL, | ||
6477 | 0x80000000780c0000ULL, | ||
6478 | 0xf806000000000000ULL, | ||
6479 | 0ULL | ||
6480 | }, | ||
6481 | { | ||
6482 | /* fixed_bit_values */ | ||
6483 | 0x0000000000cc0000ULL, | ||
6484 | 0x0832000000000000ULL, | ||
6485 | 0x8000000018080000ULL, | ||
6486 | 0x9804000000000000ULL, | ||
6487 | -1ULL | ||
6488 | } | ||
6489 | }, | ||
6490 | { "or.sn", TILE_OPC_OR_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6491 | TREG_SN, /* implicitly_written_register */ | ||
6492 | 1, /* can_bundle */ | ||
6493 | { | ||
6494 | /* operands */ | ||
6495 | { 7, 8, 16 }, | ||
6496 | { 9, 10, 17 }, | ||
6497 | { 0, }, | ||
6498 | { 0, }, | ||
6499 | { 0, } | ||
6500 | }, | ||
6501 | { | ||
6502 | /* fixed_bit_masks */ | ||
6503 | 0x800000007ffc0000ULL, | ||
6504 | 0xfffe000000000000ULL, | ||
6505 | 0ULL, | ||
6506 | 0ULL, | ||
6507 | 0ULL | ||
6508 | }, | ||
6509 | { | ||
6510 | /* fixed_bit_values */ | ||
6511 | 0x0000000008cc0000ULL, | ||
6512 | 0x0c32000000000000ULL, | ||
6513 | -1ULL, | ||
6514 | -1ULL, | ||
6515 | -1ULL | ||
6516 | } | ||
6517 | }, | ||
6518 | { "ori", TILE_OPC_ORI, 0xf /* pipes */, 3 /* num_operands */, | ||
6519 | TREG_ZERO, /* implicitly_written_register */ | ||
6520 | 1, /* can_bundle */ | ||
6521 | { | ||
6522 | /* operands */ | ||
6523 | { 7, 8, 0 }, | ||
6524 | { 9, 10, 1 }, | ||
6525 | { 11, 12, 2 }, | ||
6526 | { 13, 14, 3 }, | ||
6527 | { 0, } | ||
6528 | }, | ||
6529 | { | ||
6530 | /* fixed_bit_masks */ | ||
6531 | 0x800000007ff00000ULL, | ||
6532 | 0xfff8000000000000ULL, | ||
6533 | 0x8000000078000000ULL, | ||
6534 | 0xf800000000000000ULL, | ||
6535 | 0ULL | ||
6536 | }, | ||
6537 | { | ||
6538 | /* fixed_bit_values */ | ||
6539 | 0x0000000040800000ULL, | ||
6540 | 0x3058000000000000ULL, | ||
6541 | 0x8000000058000000ULL, | ||
6542 | 0xc800000000000000ULL, | ||
6543 | -1ULL | ||
6544 | } | ||
6545 | }, | ||
6546 | { "ori.sn", TILE_OPC_ORI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6547 | TREG_SN, /* implicitly_written_register */ | ||
6548 | 1, /* can_bundle */ | ||
6549 | { | ||
6550 | /* operands */ | ||
6551 | { 7, 8, 0 }, | ||
6552 | { 9, 10, 1 }, | ||
6553 | { 0, }, | ||
6554 | { 0, }, | ||
6555 | { 0, } | ||
6556 | }, | ||
6557 | { | ||
6558 | /* fixed_bit_masks */ | ||
6559 | 0x800000007ff00000ULL, | ||
6560 | 0xfff8000000000000ULL, | ||
6561 | 0ULL, | ||
6562 | 0ULL, | ||
6563 | 0ULL | ||
6564 | }, | ||
6565 | { | ||
6566 | /* fixed_bit_values */ | ||
6567 | 0x0000000048800000ULL, | ||
6568 | 0x3458000000000000ULL, | ||
6569 | -1ULL, | ||
6570 | -1ULL, | ||
6571 | -1ULL | ||
6572 | } | ||
6573 | }, | ||
6574 | { "packbs_u", TILE_OPC_PACKBS_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
6575 | TREG_ZERO, /* implicitly_written_register */ | ||
6576 | 1, /* can_bundle */ | ||
6577 | { | ||
6578 | /* operands */ | ||
6579 | { 7, 8, 16 }, | ||
6580 | { 9, 10, 17 }, | ||
6581 | { 0, }, | ||
6582 | { 0, }, | ||
6583 | { 0, } | ||
6584 | }, | ||
6585 | { | ||
6586 | /* fixed_bit_masks */ | ||
6587 | 0x800000007ffc0000ULL, | ||
6588 | 0xfffe000000000000ULL, | ||
6589 | 0ULL, | ||
6590 | 0ULL, | ||
6591 | 0ULL | ||
6592 | }, | ||
6593 | { | ||
6594 | /* fixed_bit_values */ | ||
6595 | 0x00000000019c0000ULL, | ||
6596 | 0x0892000000000000ULL, | ||
6597 | -1ULL, | ||
6598 | -1ULL, | ||
6599 | -1ULL | ||
6600 | } | ||
6601 | }, | ||
6602 | { "packbs_u.sn", TILE_OPC_PACKBS_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6603 | TREG_SN, /* implicitly_written_register */ | ||
6604 | 1, /* can_bundle */ | ||
6605 | { | ||
6606 | /* operands */ | ||
6607 | { 7, 8, 16 }, | ||
6608 | { 9, 10, 17 }, | ||
6609 | { 0, }, | ||
6610 | { 0, }, | ||
6611 | { 0, } | ||
6612 | }, | ||
6613 | { | ||
6614 | /* fixed_bit_masks */ | ||
6615 | 0x800000007ffc0000ULL, | ||
6616 | 0xfffe000000000000ULL, | ||
6617 | 0ULL, | ||
6618 | 0ULL, | ||
6619 | 0ULL | ||
6620 | }, | ||
6621 | { | ||
6622 | /* fixed_bit_values */ | ||
6623 | 0x00000000099c0000ULL, | ||
6624 | 0x0c92000000000000ULL, | ||
6625 | -1ULL, | ||
6626 | -1ULL, | ||
6627 | -1ULL | ||
6628 | } | ||
6629 | }, | ||
6630 | { "packhb", TILE_OPC_PACKHB, 0x3 /* pipes */, 3 /* num_operands */, | ||
6631 | TREG_ZERO, /* implicitly_written_register */ | ||
6632 | 1, /* can_bundle */ | ||
6633 | { | ||
6634 | /* operands */ | ||
6635 | { 7, 8, 16 }, | ||
6636 | { 9, 10, 17 }, | ||
6637 | { 0, }, | ||
6638 | { 0, }, | ||
6639 | { 0, } | ||
6640 | }, | ||
6641 | { | ||
6642 | /* fixed_bit_masks */ | ||
6643 | 0x800000007ffc0000ULL, | ||
6644 | 0xfffe000000000000ULL, | ||
6645 | 0ULL, | ||
6646 | 0ULL, | ||
6647 | 0ULL | ||
6648 | }, | ||
6649 | { | ||
6650 | /* fixed_bit_values */ | ||
6651 | 0x0000000000d00000ULL, | ||
6652 | 0x0834000000000000ULL, | ||
6653 | -1ULL, | ||
6654 | -1ULL, | ||
6655 | -1ULL | ||
6656 | } | ||
6657 | }, | ||
6658 | { "packhb.sn", TILE_OPC_PACKHB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6659 | TREG_SN, /* implicitly_written_register */ | ||
6660 | 1, /* can_bundle */ | ||
6661 | { | ||
6662 | /* operands */ | ||
6663 | { 7, 8, 16 }, | ||
6664 | { 9, 10, 17 }, | ||
6665 | { 0, }, | ||
6666 | { 0, }, | ||
6667 | { 0, } | ||
6668 | }, | ||
6669 | { | ||
6670 | /* fixed_bit_masks */ | ||
6671 | 0x800000007ffc0000ULL, | ||
6672 | 0xfffe000000000000ULL, | ||
6673 | 0ULL, | ||
6674 | 0ULL, | ||
6675 | 0ULL | ||
6676 | }, | ||
6677 | { | ||
6678 | /* fixed_bit_values */ | ||
6679 | 0x0000000008d00000ULL, | ||
6680 | 0x0c34000000000000ULL, | ||
6681 | -1ULL, | ||
6682 | -1ULL, | ||
6683 | -1ULL | ||
6684 | } | ||
6685 | }, | ||
6686 | { "packhs", TILE_OPC_PACKHS, 0x3 /* pipes */, 3 /* num_operands */, | ||
6687 | TREG_ZERO, /* implicitly_written_register */ | ||
6688 | 1, /* can_bundle */ | ||
6689 | { | ||
6690 | /* operands */ | ||
6691 | { 7, 8, 16 }, | ||
6692 | { 9, 10, 17 }, | ||
6693 | { 0, }, | ||
6694 | { 0, }, | ||
6695 | { 0, } | ||
6696 | }, | ||
6697 | { | ||
6698 | /* fixed_bit_masks */ | ||
6699 | 0x800000007ffc0000ULL, | ||
6700 | 0xfffe000000000000ULL, | ||
6701 | 0ULL, | ||
6702 | 0ULL, | ||
6703 | 0ULL | ||
6704 | }, | ||
6705 | { | ||
6706 | /* fixed_bit_values */ | ||
6707 | 0x0000000001980000ULL, | ||
6708 | 0x0890000000000000ULL, | ||
6709 | -1ULL, | ||
6710 | -1ULL, | ||
6711 | -1ULL | ||
6712 | } | ||
6713 | }, | ||
6714 | { "packhs.sn", TILE_OPC_PACKHS_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6715 | TREG_SN, /* implicitly_written_register */ | ||
6716 | 1, /* can_bundle */ | ||
6717 | { | ||
6718 | /* operands */ | ||
6719 | { 7, 8, 16 }, | ||
6720 | { 9, 10, 17 }, | ||
6721 | { 0, }, | ||
6722 | { 0, }, | ||
6723 | { 0, } | ||
6724 | }, | ||
6725 | { | ||
6726 | /* fixed_bit_masks */ | ||
6727 | 0x800000007ffc0000ULL, | ||
6728 | 0xfffe000000000000ULL, | ||
6729 | 0ULL, | ||
6730 | 0ULL, | ||
6731 | 0ULL | ||
6732 | }, | ||
6733 | { | ||
6734 | /* fixed_bit_values */ | ||
6735 | 0x0000000009980000ULL, | ||
6736 | 0x0c90000000000000ULL, | ||
6737 | -1ULL, | ||
6738 | -1ULL, | ||
6739 | -1ULL | ||
6740 | } | ||
6741 | }, | ||
6742 | { "packlb", TILE_OPC_PACKLB, 0x3 /* pipes */, 3 /* num_operands */, | ||
6743 | TREG_ZERO, /* implicitly_written_register */ | ||
6744 | 1, /* can_bundle */ | ||
6745 | { | ||
6746 | /* operands */ | ||
6747 | { 7, 8, 16 }, | ||
6748 | { 9, 10, 17 }, | ||
6749 | { 0, }, | ||
6750 | { 0, }, | ||
6751 | { 0, } | ||
6752 | }, | ||
6753 | { | ||
6754 | /* fixed_bit_masks */ | ||
6755 | 0x800000007ffc0000ULL, | ||
6756 | 0xfffe000000000000ULL, | ||
6757 | 0ULL, | ||
6758 | 0ULL, | ||
6759 | 0ULL | ||
6760 | }, | ||
6761 | { | ||
6762 | /* fixed_bit_values */ | ||
6763 | 0x0000000000d40000ULL, | ||
6764 | 0x0836000000000000ULL, | ||
6765 | -1ULL, | ||
6766 | -1ULL, | ||
6767 | -1ULL | ||
6768 | } | ||
6769 | }, | ||
6770 | { "packlb.sn", TILE_OPC_PACKLB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6771 | TREG_SN, /* implicitly_written_register */ | ||
6772 | 1, /* can_bundle */ | ||
6773 | { | ||
6774 | /* operands */ | ||
6775 | { 7, 8, 16 }, | ||
6776 | { 9, 10, 17 }, | ||
6777 | { 0, }, | ||
6778 | { 0, }, | ||
6779 | { 0, } | ||
6780 | }, | ||
6781 | { | ||
6782 | /* fixed_bit_masks */ | ||
6783 | 0x800000007ffc0000ULL, | ||
6784 | 0xfffe000000000000ULL, | ||
6785 | 0ULL, | ||
6786 | 0ULL, | ||
6787 | 0ULL | ||
6788 | }, | ||
6789 | { | ||
6790 | /* fixed_bit_values */ | ||
6791 | 0x0000000008d40000ULL, | ||
6792 | 0x0c36000000000000ULL, | ||
6793 | -1ULL, | ||
6794 | -1ULL, | ||
6795 | -1ULL | ||
6796 | } | ||
6797 | }, | ||
6798 | { "pcnt", TILE_OPC_PCNT, 0x5 /* pipes */, 2 /* num_operands */, | ||
6799 | TREG_ZERO, /* implicitly_written_register */ | ||
6800 | 1, /* can_bundle */ | ||
6801 | { | ||
6802 | /* operands */ | ||
6803 | { 7, 8 }, | ||
6804 | { 0, }, | ||
6805 | { 11, 12 }, | ||
6806 | { 0, }, | ||
6807 | { 0, } | ||
6808 | }, | ||
6809 | { | ||
6810 | /* fixed_bit_masks */ | ||
6811 | 0x800000007ffff000ULL, | ||
6812 | 0ULL, | ||
6813 | 0x80000000780ff000ULL, | ||
6814 | 0ULL, | ||
6815 | 0ULL | ||
6816 | }, | ||
6817 | { | ||
6818 | /* fixed_bit_values */ | ||
6819 | 0x0000000070167000ULL, | ||
6820 | -1ULL, | ||
6821 | 0x80000000680a7000ULL, | ||
6822 | -1ULL, | ||
6823 | -1ULL | ||
6824 | } | ||
6825 | }, | ||
6826 | { "pcnt.sn", TILE_OPC_PCNT_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
6827 | TREG_SN, /* implicitly_written_register */ | ||
6828 | 1, /* can_bundle */ | ||
6829 | { | ||
6830 | /* operands */ | ||
6831 | { 7, 8 }, | ||
6832 | { 0, }, | ||
6833 | { 0, }, | ||
6834 | { 0, }, | ||
6835 | { 0, } | ||
6836 | }, | ||
6837 | { | ||
6838 | /* fixed_bit_masks */ | ||
6839 | 0x800000007ffff000ULL, | ||
6840 | 0ULL, | ||
6841 | 0ULL, | ||
6842 | 0ULL, | ||
6843 | 0ULL | ||
6844 | }, | ||
6845 | { | ||
6846 | /* fixed_bit_values */ | ||
6847 | 0x0000000078167000ULL, | ||
6848 | -1ULL, | ||
6849 | -1ULL, | ||
6850 | -1ULL, | ||
6851 | -1ULL | ||
6852 | } | ||
6853 | }, | ||
6854 | { "rl", TILE_OPC_RL, 0xf /* pipes */, 3 /* num_operands */, | ||
6855 | TREG_ZERO, /* implicitly_written_register */ | ||
6856 | 1, /* can_bundle */ | ||
6857 | { | ||
6858 | /* operands */ | ||
6859 | { 7, 8, 16 }, | ||
6860 | { 9, 10, 17 }, | ||
6861 | { 11, 12, 18 }, | ||
6862 | { 13, 14, 19 }, | ||
6863 | { 0, } | ||
6864 | }, | ||
6865 | { | ||
6866 | /* fixed_bit_masks */ | ||
6867 | 0x800000007ffc0000ULL, | ||
6868 | 0xfffe000000000000ULL, | ||
6869 | 0x80000000780c0000ULL, | ||
6870 | 0xf806000000000000ULL, | ||
6871 | 0ULL | ||
6872 | }, | ||
6873 | { | ||
6874 | /* fixed_bit_values */ | ||
6875 | 0x0000000000d80000ULL, | ||
6876 | 0x0838000000000000ULL, | ||
6877 | 0x8000000020000000ULL, | ||
6878 | 0xa000000000000000ULL, | ||
6879 | -1ULL | ||
6880 | } | ||
6881 | }, | ||
6882 | { "rl.sn", TILE_OPC_RL_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6883 | TREG_SN, /* implicitly_written_register */ | ||
6884 | 1, /* can_bundle */ | ||
6885 | { | ||
6886 | /* operands */ | ||
6887 | { 7, 8, 16 }, | ||
6888 | { 9, 10, 17 }, | ||
6889 | { 0, }, | ||
6890 | { 0, }, | ||
6891 | { 0, } | ||
6892 | }, | ||
6893 | { | ||
6894 | /* fixed_bit_masks */ | ||
6895 | 0x800000007ffc0000ULL, | ||
6896 | 0xfffe000000000000ULL, | ||
6897 | 0ULL, | ||
6898 | 0ULL, | ||
6899 | 0ULL | ||
6900 | }, | ||
6901 | { | ||
6902 | /* fixed_bit_values */ | ||
6903 | 0x0000000008d80000ULL, | ||
6904 | 0x0c38000000000000ULL, | ||
6905 | -1ULL, | ||
6906 | -1ULL, | ||
6907 | -1ULL | ||
6908 | } | ||
6909 | }, | ||
6910 | { "rli", TILE_OPC_RLI, 0xf /* pipes */, 3 /* num_operands */, | ||
6911 | TREG_ZERO, /* implicitly_written_register */ | ||
6912 | 1, /* can_bundle */ | ||
6913 | { | ||
6914 | /* operands */ | ||
6915 | { 7, 8, 32 }, | ||
6916 | { 9, 10, 33 }, | ||
6917 | { 11, 12, 34 }, | ||
6918 | { 13, 14, 35 }, | ||
6919 | { 0, } | ||
6920 | }, | ||
6921 | { | ||
6922 | /* fixed_bit_masks */ | ||
6923 | 0x800000007ffe0000ULL, | ||
6924 | 0xffff000000000000ULL, | ||
6925 | 0x80000000780e0000ULL, | ||
6926 | 0xf807000000000000ULL, | ||
6927 | 0ULL | ||
6928 | }, | ||
6929 | { | ||
6930 | /* fixed_bit_values */ | ||
6931 | 0x0000000070020000ULL, | ||
6932 | 0x4001000000000000ULL, | ||
6933 | 0x8000000068020000ULL, | ||
6934 | 0xd801000000000000ULL, | ||
6935 | -1ULL | ||
6936 | } | ||
6937 | }, | ||
6938 | { "rli.sn", TILE_OPC_RLI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6939 | TREG_SN, /* implicitly_written_register */ | ||
6940 | 1, /* can_bundle */ | ||
6941 | { | ||
6942 | /* operands */ | ||
6943 | { 7, 8, 32 }, | ||
6944 | { 9, 10, 33 }, | ||
6945 | { 0, }, | ||
6946 | { 0, }, | ||
6947 | { 0, } | ||
6948 | }, | ||
6949 | { | ||
6950 | /* fixed_bit_masks */ | ||
6951 | 0x800000007ffe0000ULL, | ||
6952 | 0xffff000000000000ULL, | ||
6953 | 0ULL, | ||
6954 | 0ULL, | ||
6955 | 0ULL | ||
6956 | }, | ||
6957 | { | ||
6958 | /* fixed_bit_values */ | ||
6959 | 0x0000000078020000ULL, | ||
6960 | 0x4401000000000000ULL, | ||
6961 | -1ULL, | ||
6962 | -1ULL, | ||
6963 | -1ULL | ||
6964 | } | ||
6965 | }, | ||
6966 | { "s1a", TILE_OPC_S1A, 0xf /* pipes */, 3 /* num_operands */, | ||
6967 | TREG_ZERO, /* implicitly_written_register */ | ||
6968 | 1, /* can_bundle */ | ||
6969 | { | ||
6970 | /* operands */ | ||
6971 | { 7, 8, 16 }, | ||
6972 | { 9, 10, 17 }, | ||
6973 | { 11, 12, 18 }, | ||
6974 | { 13, 14, 19 }, | ||
6975 | { 0, } | ||
6976 | }, | ||
6977 | { | ||
6978 | /* fixed_bit_masks */ | ||
6979 | 0x800000007ffc0000ULL, | ||
6980 | 0xfffe000000000000ULL, | ||
6981 | 0x80000000780c0000ULL, | ||
6982 | 0xf806000000000000ULL, | ||
6983 | 0ULL | ||
6984 | }, | ||
6985 | { | ||
6986 | /* fixed_bit_values */ | ||
6987 | 0x0000000000dc0000ULL, | ||
6988 | 0x083a000000000000ULL, | ||
6989 | 0x8000000008040000ULL, | ||
6990 | 0x8802000000000000ULL, | ||
6991 | -1ULL | ||
6992 | } | ||
6993 | }, | ||
6994 | { "s1a.sn", TILE_OPC_S1A_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
6995 | TREG_SN, /* implicitly_written_register */ | ||
6996 | 1, /* can_bundle */ | ||
6997 | { | ||
6998 | /* operands */ | ||
6999 | { 7, 8, 16 }, | ||
7000 | { 9, 10, 17 }, | ||
7001 | { 0, }, | ||
7002 | { 0, }, | ||
7003 | { 0, } | ||
7004 | }, | ||
7005 | { | ||
7006 | /* fixed_bit_masks */ | ||
7007 | 0x800000007ffc0000ULL, | ||
7008 | 0xfffe000000000000ULL, | ||
7009 | 0ULL, | ||
7010 | 0ULL, | ||
7011 | 0ULL | ||
7012 | }, | ||
7013 | { | ||
7014 | /* fixed_bit_values */ | ||
7015 | 0x0000000008dc0000ULL, | ||
7016 | 0x0c3a000000000000ULL, | ||
7017 | -1ULL, | ||
7018 | -1ULL, | ||
7019 | -1ULL | ||
7020 | } | ||
7021 | }, | ||
7022 | { "s2a", TILE_OPC_S2A, 0xf /* pipes */, 3 /* num_operands */, | ||
7023 | TREG_ZERO, /* implicitly_written_register */ | ||
7024 | 1, /* can_bundle */ | ||
7025 | { | ||
7026 | /* operands */ | ||
7027 | { 7, 8, 16 }, | ||
7028 | { 9, 10, 17 }, | ||
7029 | { 11, 12, 18 }, | ||
7030 | { 13, 14, 19 }, | ||
7031 | { 0, } | ||
7032 | }, | ||
7033 | { | ||
7034 | /* fixed_bit_masks */ | ||
7035 | 0x800000007ffc0000ULL, | ||
7036 | 0xfffe000000000000ULL, | ||
7037 | 0x80000000780c0000ULL, | ||
7038 | 0xf806000000000000ULL, | ||
7039 | 0ULL | ||
7040 | }, | ||
7041 | { | ||
7042 | /* fixed_bit_values */ | ||
7043 | 0x0000000000e00000ULL, | ||
7044 | 0x083c000000000000ULL, | ||
7045 | 0x8000000008080000ULL, | ||
7046 | 0x8804000000000000ULL, | ||
7047 | -1ULL | ||
7048 | } | ||
7049 | }, | ||
7050 | { "s2a.sn", TILE_OPC_S2A_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7051 | TREG_SN, /* implicitly_written_register */ | ||
7052 | 1, /* can_bundle */ | ||
7053 | { | ||
7054 | /* operands */ | ||
7055 | { 7, 8, 16 }, | ||
7056 | { 9, 10, 17 }, | ||
7057 | { 0, }, | ||
7058 | { 0, }, | ||
7059 | { 0, } | ||
7060 | }, | ||
7061 | { | ||
7062 | /* fixed_bit_masks */ | ||
7063 | 0x800000007ffc0000ULL, | ||
7064 | 0xfffe000000000000ULL, | ||
7065 | 0ULL, | ||
7066 | 0ULL, | ||
7067 | 0ULL | ||
7068 | }, | ||
7069 | { | ||
7070 | /* fixed_bit_values */ | ||
7071 | 0x0000000008e00000ULL, | ||
7072 | 0x0c3c000000000000ULL, | ||
7073 | -1ULL, | ||
7074 | -1ULL, | ||
7075 | -1ULL | ||
7076 | } | ||
7077 | }, | ||
7078 | { "s3a", TILE_OPC_S3A, 0xf /* pipes */, 3 /* num_operands */, | ||
7079 | TREG_ZERO, /* implicitly_written_register */ | ||
7080 | 1, /* can_bundle */ | ||
7081 | { | ||
7082 | /* operands */ | ||
7083 | { 7, 8, 16 }, | ||
7084 | { 9, 10, 17 }, | ||
7085 | { 11, 12, 18 }, | ||
7086 | { 13, 14, 19 }, | ||
7087 | { 0, } | ||
7088 | }, | ||
7089 | { | ||
7090 | /* fixed_bit_masks */ | ||
7091 | 0x800000007ffc0000ULL, | ||
7092 | 0xfffe000000000000ULL, | ||
7093 | 0x80000000780c0000ULL, | ||
7094 | 0xf806000000000000ULL, | ||
7095 | 0ULL | ||
7096 | }, | ||
7097 | { | ||
7098 | /* fixed_bit_values */ | ||
7099 | 0x0000000000e40000ULL, | ||
7100 | 0x083e000000000000ULL, | ||
7101 | 0x8000000030040000ULL, | ||
7102 | 0xb002000000000000ULL, | ||
7103 | -1ULL | ||
7104 | } | ||
7105 | }, | ||
7106 | { "s3a.sn", TILE_OPC_S3A_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7107 | TREG_SN, /* implicitly_written_register */ | ||
7108 | 1, /* can_bundle */ | ||
7109 | { | ||
7110 | /* operands */ | ||
7111 | { 7, 8, 16 }, | ||
7112 | { 9, 10, 17 }, | ||
7113 | { 0, }, | ||
7114 | { 0, }, | ||
7115 | { 0, } | ||
7116 | }, | ||
7117 | { | ||
7118 | /* fixed_bit_masks */ | ||
7119 | 0x800000007ffc0000ULL, | ||
7120 | 0xfffe000000000000ULL, | ||
7121 | 0ULL, | ||
7122 | 0ULL, | ||
7123 | 0ULL | ||
7124 | }, | ||
7125 | { | ||
7126 | /* fixed_bit_values */ | ||
7127 | 0x0000000008e40000ULL, | ||
7128 | 0x0c3e000000000000ULL, | ||
7129 | -1ULL, | ||
7130 | -1ULL, | ||
7131 | -1ULL | ||
7132 | } | ||
7133 | }, | ||
7134 | { "sadab_u", TILE_OPC_SADAB_U, 0x1 /* pipes */, 3 /* num_operands */, | ||
7135 | TREG_ZERO, /* implicitly_written_register */ | ||
7136 | 1, /* can_bundle */ | ||
7137 | { | ||
7138 | /* operands */ | ||
7139 | { 21, 8, 16 }, | ||
7140 | { 0, }, | ||
7141 | { 0, }, | ||
7142 | { 0, }, | ||
7143 | { 0, } | ||
7144 | }, | ||
7145 | { | ||
7146 | /* fixed_bit_masks */ | ||
7147 | 0x800000007ffc0000ULL, | ||
7148 | 0ULL, | ||
7149 | 0ULL, | ||
7150 | 0ULL, | ||
7151 | 0ULL | ||
7152 | }, | ||
7153 | { | ||
7154 | /* fixed_bit_values */ | ||
7155 | 0x0000000000e80000ULL, | ||
7156 | -1ULL, | ||
7157 | -1ULL, | ||
7158 | -1ULL, | ||
7159 | -1ULL | ||
7160 | } | ||
7161 | }, | ||
7162 | { "sadab_u.sn", TILE_OPC_SADAB_U_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
7163 | TREG_SN, /* implicitly_written_register */ | ||
7164 | 1, /* can_bundle */ | ||
7165 | { | ||
7166 | /* operands */ | ||
7167 | { 21, 8, 16 }, | ||
7168 | { 0, }, | ||
7169 | { 0, }, | ||
7170 | { 0, }, | ||
7171 | { 0, } | ||
7172 | }, | ||
7173 | { | ||
7174 | /* fixed_bit_masks */ | ||
7175 | 0x800000007ffc0000ULL, | ||
7176 | 0ULL, | ||
7177 | 0ULL, | ||
7178 | 0ULL, | ||
7179 | 0ULL | ||
7180 | }, | ||
7181 | { | ||
7182 | /* fixed_bit_values */ | ||
7183 | 0x0000000008e80000ULL, | ||
7184 | -1ULL, | ||
7185 | -1ULL, | ||
7186 | -1ULL, | ||
7187 | -1ULL | ||
7188 | } | ||
7189 | }, | ||
7190 | { "sadah", TILE_OPC_SADAH, 0x1 /* pipes */, 3 /* num_operands */, | ||
7191 | TREG_ZERO, /* implicitly_written_register */ | ||
7192 | 1, /* can_bundle */ | ||
7193 | { | ||
7194 | /* operands */ | ||
7195 | { 21, 8, 16 }, | ||
7196 | { 0, }, | ||
7197 | { 0, }, | ||
7198 | { 0, }, | ||
7199 | { 0, } | ||
7200 | }, | ||
7201 | { | ||
7202 | /* fixed_bit_masks */ | ||
7203 | 0x800000007ffc0000ULL, | ||
7204 | 0ULL, | ||
7205 | 0ULL, | ||
7206 | 0ULL, | ||
7207 | 0ULL | ||
7208 | }, | ||
7209 | { | ||
7210 | /* fixed_bit_values */ | ||
7211 | 0x0000000000ec0000ULL, | ||
7212 | -1ULL, | ||
7213 | -1ULL, | ||
7214 | -1ULL, | ||
7215 | -1ULL | ||
7216 | } | ||
7217 | }, | ||
7218 | { "sadah.sn", TILE_OPC_SADAH_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
7219 | TREG_SN, /* implicitly_written_register */ | ||
7220 | 1, /* can_bundle */ | ||
7221 | { | ||
7222 | /* operands */ | ||
7223 | { 21, 8, 16 }, | ||
7224 | { 0, }, | ||
7225 | { 0, }, | ||
7226 | { 0, }, | ||
7227 | { 0, } | ||
7228 | }, | ||
7229 | { | ||
7230 | /* fixed_bit_masks */ | ||
7231 | 0x800000007ffc0000ULL, | ||
7232 | 0ULL, | ||
7233 | 0ULL, | ||
7234 | 0ULL, | ||
7235 | 0ULL | ||
7236 | }, | ||
7237 | { | ||
7238 | /* fixed_bit_values */ | ||
7239 | 0x0000000008ec0000ULL, | ||
7240 | -1ULL, | ||
7241 | -1ULL, | ||
7242 | -1ULL, | ||
7243 | -1ULL | ||
7244 | } | ||
7245 | }, | ||
7246 | { "sadah_u", TILE_OPC_SADAH_U, 0x1 /* pipes */, 3 /* num_operands */, | ||
7247 | TREG_ZERO, /* implicitly_written_register */ | ||
7248 | 1, /* can_bundle */ | ||
7249 | { | ||
7250 | /* operands */ | ||
7251 | { 21, 8, 16 }, | ||
7252 | { 0, }, | ||
7253 | { 0, }, | ||
7254 | { 0, }, | ||
7255 | { 0, } | ||
7256 | }, | ||
7257 | { | ||
7258 | /* fixed_bit_masks */ | ||
7259 | 0x800000007ffc0000ULL, | ||
7260 | 0ULL, | ||
7261 | 0ULL, | ||
7262 | 0ULL, | ||
7263 | 0ULL | ||
7264 | }, | ||
7265 | { | ||
7266 | /* fixed_bit_values */ | ||
7267 | 0x0000000000f00000ULL, | ||
7268 | -1ULL, | ||
7269 | -1ULL, | ||
7270 | -1ULL, | ||
7271 | -1ULL | ||
7272 | } | ||
7273 | }, | ||
7274 | { "sadah_u.sn", TILE_OPC_SADAH_U_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
7275 | TREG_SN, /* implicitly_written_register */ | ||
7276 | 1, /* can_bundle */ | ||
7277 | { | ||
7278 | /* operands */ | ||
7279 | { 21, 8, 16 }, | ||
7280 | { 0, }, | ||
7281 | { 0, }, | ||
7282 | { 0, }, | ||
7283 | { 0, } | ||
7284 | }, | ||
7285 | { | ||
7286 | /* fixed_bit_masks */ | ||
7287 | 0x800000007ffc0000ULL, | ||
7288 | 0ULL, | ||
7289 | 0ULL, | ||
7290 | 0ULL, | ||
7291 | 0ULL | ||
7292 | }, | ||
7293 | { | ||
7294 | /* fixed_bit_values */ | ||
7295 | 0x0000000008f00000ULL, | ||
7296 | -1ULL, | ||
7297 | -1ULL, | ||
7298 | -1ULL, | ||
7299 | -1ULL | ||
7300 | } | ||
7301 | }, | ||
7302 | { "sadb_u", TILE_OPC_SADB_U, 0x1 /* pipes */, 3 /* num_operands */, | ||
7303 | TREG_ZERO, /* implicitly_written_register */ | ||
7304 | 1, /* can_bundle */ | ||
7305 | { | ||
7306 | /* operands */ | ||
7307 | { 7, 8, 16 }, | ||
7308 | { 0, }, | ||
7309 | { 0, }, | ||
7310 | { 0, }, | ||
7311 | { 0, } | ||
7312 | }, | ||
7313 | { | ||
7314 | /* fixed_bit_masks */ | ||
7315 | 0x800000007ffc0000ULL, | ||
7316 | 0ULL, | ||
7317 | 0ULL, | ||
7318 | 0ULL, | ||
7319 | 0ULL | ||
7320 | }, | ||
7321 | { | ||
7322 | /* fixed_bit_values */ | ||
7323 | 0x0000000000f40000ULL, | ||
7324 | -1ULL, | ||
7325 | -1ULL, | ||
7326 | -1ULL, | ||
7327 | -1ULL | ||
7328 | } | ||
7329 | }, | ||
7330 | { "sadb_u.sn", TILE_OPC_SADB_U_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
7331 | TREG_SN, /* implicitly_written_register */ | ||
7332 | 1, /* can_bundle */ | ||
7333 | { | ||
7334 | /* operands */ | ||
7335 | { 7, 8, 16 }, | ||
7336 | { 0, }, | ||
7337 | { 0, }, | ||
7338 | { 0, }, | ||
7339 | { 0, } | ||
7340 | }, | ||
7341 | { | ||
7342 | /* fixed_bit_masks */ | ||
7343 | 0x800000007ffc0000ULL, | ||
7344 | 0ULL, | ||
7345 | 0ULL, | ||
7346 | 0ULL, | ||
7347 | 0ULL | ||
7348 | }, | ||
7349 | { | ||
7350 | /* fixed_bit_values */ | ||
7351 | 0x0000000008f40000ULL, | ||
7352 | -1ULL, | ||
7353 | -1ULL, | ||
7354 | -1ULL, | ||
7355 | -1ULL | ||
7356 | } | ||
7357 | }, | ||
7358 | { "sadh", TILE_OPC_SADH, 0x1 /* pipes */, 3 /* num_operands */, | ||
7359 | TREG_ZERO, /* implicitly_written_register */ | ||
7360 | 1, /* can_bundle */ | ||
7361 | { | ||
7362 | /* operands */ | ||
7363 | { 7, 8, 16 }, | ||
7364 | { 0, }, | ||
7365 | { 0, }, | ||
7366 | { 0, }, | ||
7367 | { 0, } | ||
7368 | }, | ||
7369 | { | ||
7370 | /* fixed_bit_masks */ | ||
7371 | 0x800000007ffc0000ULL, | ||
7372 | 0ULL, | ||
7373 | 0ULL, | ||
7374 | 0ULL, | ||
7375 | 0ULL | ||
7376 | }, | ||
7377 | { | ||
7378 | /* fixed_bit_values */ | ||
7379 | 0x0000000000f80000ULL, | ||
7380 | -1ULL, | ||
7381 | -1ULL, | ||
7382 | -1ULL, | ||
7383 | -1ULL | ||
7384 | } | ||
7385 | }, | ||
7386 | { "sadh.sn", TILE_OPC_SADH_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
7387 | TREG_SN, /* implicitly_written_register */ | ||
7388 | 1, /* can_bundle */ | ||
7389 | { | ||
7390 | /* operands */ | ||
7391 | { 7, 8, 16 }, | ||
7392 | { 0, }, | ||
7393 | { 0, }, | ||
7394 | { 0, }, | ||
7395 | { 0, } | ||
7396 | }, | ||
7397 | { | ||
7398 | /* fixed_bit_masks */ | ||
7399 | 0x800000007ffc0000ULL, | ||
7400 | 0ULL, | ||
7401 | 0ULL, | ||
7402 | 0ULL, | ||
7403 | 0ULL | ||
7404 | }, | ||
7405 | { | ||
7406 | /* fixed_bit_values */ | ||
7407 | 0x0000000008f80000ULL, | ||
7408 | -1ULL, | ||
7409 | -1ULL, | ||
7410 | -1ULL, | ||
7411 | -1ULL | ||
7412 | } | ||
7413 | }, | ||
7414 | { "sadh_u", TILE_OPC_SADH_U, 0x1 /* pipes */, 3 /* num_operands */, | ||
7415 | TREG_ZERO, /* implicitly_written_register */ | ||
7416 | 1, /* can_bundle */ | ||
7417 | { | ||
7418 | /* operands */ | ||
7419 | { 7, 8, 16 }, | ||
7420 | { 0, }, | ||
7421 | { 0, }, | ||
7422 | { 0, }, | ||
7423 | { 0, } | ||
7424 | }, | ||
7425 | { | ||
7426 | /* fixed_bit_masks */ | ||
7427 | 0x800000007ffc0000ULL, | ||
7428 | 0ULL, | ||
7429 | 0ULL, | ||
7430 | 0ULL, | ||
7431 | 0ULL | ||
7432 | }, | ||
7433 | { | ||
7434 | /* fixed_bit_values */ | ||
7435 | 0x0000000000fc0000ULL, | ||
7436 | -1ULL, | ||
7437 | -1ULL, | ||
7438 | -1ULL, | ||
7439 | -1ULL | ||
7440 | } | ||
7441 | }, | ||
7442 | { "sadh_u.sn", TILE_OPC_SADH_U_SN, 0x1 /* pipes */, 3 /* num_operands */, | ||
7443 | TREG_SN, /* implicitly_written_register */ | ||
7444 | 1, /* can_bundle */ | ||
7445 | { | ||
7446 | /* operands */ | ||
7447 | { 7, 8, 16 }, | ||
7448 | { 0, }, | ||
7449 | { 0, }, | ||
7450 | { 0, }, | ||
7451 | { 0, } | ||
7452 | }, | ||
7453 | { | ||
7454 | /* fixed_bit_masks */ | ||
7455 | 0x800000007ffc0000ULL, | ||
7456 | 0ULL, | ||
7457 | 0ULL, | ||
7458 | 0ULL, | ||
7459 | 0ULL | ||
7460 | }, | ||
7461 | { | ||
7462 | /* fixed_bit_values */ | ||
7463 | 0x0000000008fc0000ULL, | ||
7464 | -1ULL, | ||
7465 | -1ULL, | ||
7466 | -1ULL, | ||
7467 | -1ULL | ||
7468 | } | ||
7469 | }, | ||
7470 | { "sb", TILE_OPC_SB, 0x12 /* pipes */, 2 /* num_operands */, | ||
7471 | TREG_ZERO, /* implicitly_written_register */ | ||
7472 | 1, /* can_bundle */ | ||
7473 | { | ||
7474 | /* operands */ | ||
7475 | { 0, }, | ||
7476 | { 10, 17 }, | ||
7477 | { 0, }, | ||
7478 | { 0, }, | ||
7479 | { 15, 36 } | ||
7480 | }, | ||
7481 | { | ||
7482 | /* fixed_bit_masks */ | ||
7483 | 0ULL, | ||
7484 | 0xfbfe000000000000ULL, | ||
7485 | 0ULL, | ||
7486 | 0ULL, | ||
7487 | 0x8700000000000000ULL | ||
7488 | }, | ||
7489 | { | ||
7490 | /* fixed_bit_values */ | ||
7491 | -1ULL, | ||
7492 | 0x0840000000000000ULL, | ||
7493 | -1ULL, | ||
7494 | -1ULL, | ||
7495 | 0x8500000000000000ULL | ||
7496 | } | ||
7497 | }, | ||
7498 | { "sbadd", TILE_OPC_SBADD, 0x2 /* pipes */, 3 /* num_operands */, | ||
7499 | TREG_ZERO, /* implicitly_written_register */ | ||
7500 | 1, /* can_bundle */ | ||
7501 | { | ||
7502 | /* operands */ | ||
7503 | { 0, }, | ||
7504 | { 24, 17, 37 }, | ||
7505 | { 0, }, | ||
7506 | { 0, }, | ||
7507 | { 0, } | ||
7508 | }, | ||
7509 | { | ||
7510 | /* fixed_bit_masks */ | ||
7511 | 0ULL, | ||
7512 | 0xfbf8000000000000ULL, | ||
7513 | 0ULL, | ||
7514 | 0ULL, | ||
7515 | 0ULL | ||
7516 | }, | ||
7517 | { | ||
7518 | /* fixed_bit_values */ | ||
7519 | -1ULL, | ||
7520 | 0x30e0000000000000ULL, | ||
7521 | -1ULL, | ||
7522 | -1ULL, | ||
7523 | -1ULL | ||
7524 | } | ||
7525 | }, | ||
7526 | { "seq", TILE_OPC_SEQ, 0xf /* pipes */, 3 /* num_operands */, | ||
7527 | TREG_ZERO, /* implicitly_written_register */ | ||
7528 | 1, /* can_bundle */ | ||
7529 | { | ||
7530 | /* operands */ | ||
7531 | { 7, 8, 16 }, | ||
7532 | { 9, 10, 17 }, | ||
7533 | { 11, 12, 18 }, | ||
7534 | { 13, 14, 19 }, | ||
7535 | { 0, } | ||
7536 | }, | ||
7537 | { | ||
7538 | /* fixed_bit_masks */ | ||
7539 | 0x800000007ffc0000ULL, | ||
7540 | 0xfffe000000000000ULL, | ||
7541 | 0x80000000780c0000ULL, | ||
7542 | 0xf806000000000000ULL, | ||
7543 | 0ULL | ||
7544 | }, | ||
7545 | { | ||
7546 | /* fixed_bit_values */ | ||
7547 | 0x0000000001080000ULL, | ||
7548 | 0x0846000000000000ULL, | ||
7549 | 0x8000000030080000ULL, | ||
7550 | 0xb004000000000000ULL, | ||
7551 | -1ULL | ||
7552 | } | ||
7553 | }, | ||
7554 | { "seq.sn", TILE_OPC_SEQ_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7555 | TREG_SN, /* implicitly_written_register */ | ||
7556 | 1, /* can_bundle */ | ||
7557 | { | ||
7558 | /* operands */ | ||
7559 | { 7, 8, 16 }, | ||
7560 | { 9, 10, 17 }, | ||
7561 | { 0, }, | ||
7562 | { 0, }, | ||
7563 | { 0, } | ||
7564 | }, | ||
7565 | { | ||
7566 | /* fixed_bit_masks */ | ||
7567 | 0x800000007ffc0000ULL, | ||
7568 | 0xfffe000000000000ULL, | ||
7569 | 0ULL, | ||
7570 | 0ULL, | ||
7571 | 0ULL | ||
7572 | }, | ||
7573 | { | ||
7574 | /* fixed_bit_values */ | ||
7575 | 0x0000000009080000ULL, | ||
7576 | 0x0c46000000000000ULL, | ||
7577 | -1ULL, | ||
7578 | -1ULL, | ||
7579 | -1ULL | ||
7580 | } | ||
7581 | }, | ||
7582 | { "seqb", TILE_OPC_SEQB, 0x3 /* pipes */, 3 /* num_operands */, | ||
7583 | TREG_ZERO, /* implicitly_written_register */ | ||
7584 | 1, /* can_bundle */ | ||
7585 | { | ||
7586 | /* operands */ | ||
7587 | { 7, 8, 16 }, | ||
7588 | { 9, 10, 17 }, | ||
7589 | { 0, }, | ||
7590 | { 0, }, | ||
7591 | { 0, } | ||
7592 | }, | ||
7593 | { | ||
7594 | /* fixed_bit_masks */ | ||
7595 | 0x800000007ffc0000ULL, | ||
7596 | 0xfffe000000000000ULL, | ||
7597 | 0ULL, | ||
7598 | 0ULL, | ||
7599 | 0ULL | ||
7600 | }, | ||
7601 | { | ||
7602 | /* fixed_bit_values */ | ||
7603 | 0x0000000001000000ULL, | ||
7604 | 0x0842000000000000ULL, | ||
7605 | -1ULL, | ||
7606 | -1ULL, | ||
7607 | -1ULL | ||
7608 | } | ||
7609 | }, | ||
7610 | { "seqb.sn", TILE_OPC_SEQB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7611 | TREG_SN, /* implicitly_written_register */ | ||
7612 | 1, /* can_bundle */ | ||
7613 | { | ||
7614 | /* operands */ | ||
7615 | { 7, 8, 16 }, | ||
7616 | { 9, 10, 17 }, | ||
7617 | { 0, }, | ||
7618 | { 0, }, | ||
7619 | { 0, } | ||
7620 | }, | ||
7621 | { | ||
7622 | /* fixed_bit_masks */ | ||
7623 | 0x800000007ffc0000ULL, | ||
7624 | 0xfffe000000000000ULL, | ||
7625 | 0ULL, | ||
7626 | 0ULL, | ||
7627 | 0ULL | ||
7628 | }, | ||
7629 | { | ||
7630 | /* fixed_bit_values */ | ||
7631 | 0x0000000009000000ULL, | ||
7632 | 0x0c42000000000000ULL, | ||
7633 | -1ULL, | ||
7634 | -1ULL, | ||
7635 | -1ULL | ||
7636 | } | ||
7637 | }, | ||
7638 | { "seqh", TILE_OPC_SEQH, 0x3 /* pipes */, 3 /* num_operands */, | ||
7639 | TREG_ZERO, /* implicitly_written_register */ | ||
7640 | 1, /* can_bundle */ | ||
7641 | { | ||
7642 | /* operands */ | ||
7643 | { 7, 8, 16 }, | ||
7644 | { 9, 10, 17 }, | ||
7645 | { 0, }, | ||
7646 | { 0, }, | ||
7647 | { 0, } | ||
7648 | }, | ||
7649 | { | ||
7650 | /* fixed_bit_masks */ | ||
7651 | 0x800000007ffc0000ULL, | ||
7652 | 0xfffe000000000000ULL, | ||
7653 | 0ULL, | ||
7654 | 0ULL, | ||
7655 | 0ULL | ||
7656 | }, | ||
7657 | { | ||
7658 | /* fixed_bit_values */ | ||
7659 | 0x0000000001040000ULL, | ||
7660 | 0x0844000000000000ULL, | ||
7661 | -1ULL, | ||
7662 | -1ULL, | ||
7663 | -1ULL | ||
7664 | } | ||
7665 | }, | ||
7666 | { "seqh.sn", TILE_OPC_SEQH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7667 | TREG_SN, /* implicitly_written_register */ | ||
7668 | 1, /* can_bundle */ | ||
7669 | { | ||
7670 | /* operands */ | ||
7671 | { 7, 8, 16 }, | ||
7672 | { 9, 10, 17 }, | ||
7673 | { 0, }, | ||
7674 | { 0, }, | ||
7675 | { 0, } | ||
7676 | }, | ||
7677 | { | ||
7678 | /* fixed_bit_masks */ | ||
7679 | 0x800000007ffc0000ULL, | ||
7680 | 0xfffe000000000000ULL, | ||
7681 | 0ULL, | ||
7682 | 0ULL, | ||
7683 | 0ULL | ||
7684 | }, | ||
7685 | { | ||
7686 | /* fixed_bit_values */ | ||
7687 | 0x0000000009040000ULL, | ||
7688 | 0x0c44000000000000ULL, | ||
7689 | -1ULL, | ||
7690 | -1ULL, | ||
7691 | -1ULL | ||
7692 | } | ||
7693 | }, | ||
7694 | { "seqi", TILE_OPC_SEQI, 0xf /* pipes */, 3 /* num_operands */, | ||
7695 | TREG_ZERO, /* implicitly_written_register */ | ||
7696 | 1, /* can_bundle */ | ||
7697 | { | ||
7698 | /* operands */ | ||
7699 | { 7, 8, 0 }, | ||
7700 | { 9, 10, 1 }, | ||
7701 | { 11, 12, 2 }, | ||
7702 | { 13, 14, 3 }, | ||
7703 | { 0, } | ||
7704 | }, | ||
7705 | { | ||
7706 | /* fixed_bit_masks */ | ||
7707 | 0x800000007ff00000ULL, | ||
7708 | 0xfff8000000000000ULL, | ||
7709 | 0x8000000078000000ULL, | ||
7710 | 0xf800000000000000ULL, | ||
7711 | 0ULL | ||
7712 | }, | ||
7713 | { | ||
7714 | /* fixed_bit_values */ | ||
7715 | 0x0000000040b00000ULL, | ||
7716 | 0x3070000000000000ULL, | ||
7717 | 0x8000000060000000ULL, | ||
7718 | 0xd000000000000000ULL, | ||
7719 | -1ULL | ||
7720 | } | ||
7721 | }, | ||
7722 | { "seqi.sn", TILE_OPC_SEQI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7723 | TREG_SN, /* implicitly_written_register */ | ||
7724 | 1, /* can_bundle */ | ||
7725 | { | ||
7726 | /* operands */ | ||
7727 | { 7, 8, 0 }, | ||
7728 | { 9, 10, 1 }, | ||
7729 | { 0, }, | ||
7730 | { 0, }, | ||
7731 | { 0, } | ||
7732 | }, | ||
7733 | { | ||
7734 | /* fixed_bit_masks */ | ||
7735 | 0x800000007ff00000ULL, | ||
7736 | 0xfff8000000000000ULL, | ||
7737 | 0ULL, | ||
7738 | 0ULL, | ||
7739 | 0ULL | ||
7740 | }, | ||
7741 | { | ||
7742 | /* fixed_bit_values */ | ||
7743 | 0x0000000048b00000ULL, | ||
7744 | 0x3470000000000000ULL, | ||
7745 | -1ULL, | ||
7746 | -1ULL, | ||
7747 | -1ULL | ||
7748 | } | ||
7749 | }, | ||
7750 | { "seqib", TILE_OPC_SEQIB, 0x3 /* pipes */, 3 /* num_operands */, | ||
7751 | TREG_ZERO, /* implicitly_written_register */ | ||
7752 | 1, /* can_bundle */ | ||
7753 | { | ||
7754 | /* operands */ | ||
7755 | { 7, 8, 0 }, | ||
7756 | { 9, 10, 1 }, | ||
7757 | { 0, }, | ||
7758 | { 0, }, | ||
7759 | { 0, } | ||
7760 | }, | ||
7761 | { | ||
7762 | /* fixed_bit_masks */ | ||
7763 | 0x800000007ff00000ULL, | ||
7764 | 0xfff8000000000000ULL, | ||
7765 | 0ULL, | ||
7766 | 0ULL, | ||
7767 | 0ULL | ||
7768 | }, | ||
7769 | { | ||
7770 | /* fixed_bit_values */ | ||
7771 | 0x0000000040900000ULL, | ||
7772 | 0x3060000000000000ULL, | ||
7773 | -1ULL, | ||
7774 | -1ULL, | ||
7775 | -1ULL | ||
7776 | } | ||
7777 | }, | ||
7778 | { "seqib.sn", TILE_OPC_SEQIB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7779 | TREG_SN, /* implicitly_written_register */ | ||
7780 | 1, /* can_bundle */ | ||
7781 | { | ||
7782 | /* operands */ | ||
7783 | { 7, 8, 0 }, | ||
7784 | { 9, 10, 1 }, | ||
7785 | { 0, }, | ||
7786 | { 0, }, | ||
7787 | { 0, } | ||
7788 | }, | ||
7789 | { | ||
7790 | /* fixed_bit_masks */ | ||
7791 | 0x800000007ff00000ULL, | ||
7792 | 0xfff8000000000000ULL, | ||
7793 | 0ULL, | ||
7794 | 0ULL, | ||
7795 | 0ULL | ||
7796 | }, | ||
7797 | { | ||
7798 | /* fixed_bit_values */ | ||
7799 | 0x0000000048900000ULL, | ||
7800 | 0x3460000000000000ULL, | ||
7801 | -1ULL, | ||
7802 | -1ULL, | ||
7803 | -1ULL | ||
7804 | } | ||
7805 | }, | ||
7806 | { "seqih", TILE_OPC_SEQIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
7807 | TREG_ZERO, /* implicitly_written_register */ | ||
7808 | 1, /* can_bundle */ | ||
7809 | { | ||
7810 | /* operands */ | ||
7811 | { 7, 8, 0 }, | ||
7812 | { 9, 10, 1 }, | ||
7813 | { 0, }, | ||
7814 | { 0, }, | ||
7815 | { 0, } | ||
7816 | }, | ||
7817 | { | ||
7818 | /* fixed_bit_masks */ | ||
7819 | 0x800000007ff00000ULL, | ||
7820 | 0xfff8000000000000ULL, | ||
7821 | 0ULL, | ||
7822 | 0ULL, | ||
7823 | 0ULL | ||
7824 | }, | ||
7825 | { | ||
7826 | /* fixed_bit_values */ | ||
7827 | 0x0000000040a00000ULL, | ||
7828 | 0x3068000000000000ULL, | ||
7829 | -1ULL, | ||
7830 | -1ULL, | ||
7831 | -1ULL | ||
7832 | } | ||
7833 | }, | ||
7834 | { "seqih.sn", TILE_OPC_SEQIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7835 | TREG_SN, /* implicitly_written_register */ | ||
7836 | 1, /* can_bundle */ | ||
7837 | { | ||
7838 | /* operands */ | ||
7839 | { 7, 8, 0 }, | ||
7840 | { 9, 10, 1 }, | ||
7841 | { 0, }, | ||
7842 | { 0, }, | ||
7843 | { 0, } | ||
7844 | }, | ||
7845 | { | ||
7846 | /* fixed_bit_masks */ | ||
7847 | 0x800000007ff00000ULL, | ||
7848 | 0xfff8000000000000ULL, | ||
7849 | 0ULL, | ||
7850 | 0ULL, | ||
7851 | 0ULL | ||
7852 | }, | ||
7853 | { | ||
7854 | /* fixed_bit_values */ | ||
7855 | 0x0000000048a00000ULL, | ||
7856 | 0x3468000000000000ULL, | ||
7857 | -1ULL, | ||
7858 | -1ULL, | ||
7859 | -1ULL | ||
7860 | } | ||
7861 | }, | ||
7862 | { "sh", TILE_OPC_SH, 0x12 /* pipes */, 2 /* num_operands */, | ||
7863 | TREG_ZERO, /* implicitly_written_register */ | ||
7864 | 1, /* can_bundle */ | ||
7865 | { | ||
7866 | /* operands */ | ||
7867 | { 0, }, | ||
7868 | { 10, 17 }, | ||
7869 | { 0, }, | ||
7870 | { 0, }, | ||
7871 | { 15, 36 } | ||
7872 | }, | ||
7873 | { | ||
7874 | /* fixed_bit_masks */ | ||
7875 | 0ULL, | ||
7876 | 0xfbfe000000000000ULL, | ||
7877 | 0ULL, | ||
7878 | 0ULL, | ||
7879 | 0x8700000000000000ULL | ||
7880 | }, | ||
7881 | { | ||
7882 | /* fixed_bit_values */ | ||
7883 | -1ULL, | ||
7884 | 0x0854000000000000ULL, | ||
7885 | -1ULL, | ||
7886 | -1ULL, | ||
7887 | 0x8600000000000000ULL | ||
7888 | } | ||
7889 | }, | ||
7890 | { "shadd", TILE_OPC_SHADD, 0x2 /* pipes */, 3 /* num_operands */, | ||
7891 | TREG_ZERO, /* implicitly_written_register */ | ||
7892 | 1, /* can_bundle */ | ||
7893 | { | ||
7894 | /* operands */ | ||
7895 | { 0, }, | ||
7896 | { 24, 17, 37 }, | ||
7897 | { 0, }, | ||
7898 | { 0, }, | ||
7899 | { 0, } | ||
7900 | }, | ||
7901 | { | ||
7902 | /* fixed_bit_masks */ | ||
7903 | 0ULL, | ||
7904 | 0xfbf8000000000000ULL, | ||
7905 | 0ULL, | ||
7906 | 0ULL, | ||
7907 | 0ULL | ||
7908 | }, | ||
7909 | { | ||
7910 | /* fixed_bit_values */ | ||
7911 | -1ULL, | ||
7912 | 0x30e8000000000000ULL, | ||
7913 | -1ULL, | ||
7914 | -1ULL, | ||
7915 | -1ULL | ||
7916 | } | ||
7917 | }, | ||
7918 | { "shl", TILE_OPC_SHL, 0xf /* pipes */, 3 /* num_operands */, | ||
7919 | TREG_ZERO, /* implicitly_written_register */ | ||
7920 | 1, /* can_bundle */ | ||
7921 | { | ||
7922 | /* operands */ | ||
7923 | { 7, 8, 16 }, | ||
7924 | { 9, 10, 17 }, | ||
7925 | { 11, 12, 18 }, | ||
7926 | { 13, 14, 19 }, | ||
7927 | { 0, } | ||
7928 | }, | ||
7929 | { | ||
7930 | /* fixed_bit_masks */ | ||
7931 | 0x800000007ffc0000ULL, | ||
7932 | 0xfffe000000000000ULL, | ||
7933 | 0x80000000780c0000ULL, | ||
7934 | 0xf806000000000000ULL, | ||
7935 | 0ULL | ||
7936 | }, | ||
7937 | { | ||
7938 | /* fixed_bit_values */ | ||
7939 | 0x0000000001140000ULL, | ||
7940 | 0x084c000000000000ULL, | ||
7941 | 0x8000000020040000ULL, | ||
7942 | 0xa002000000000000ULL, | ||
7943 | -1ULL | ||
7944 | } | ||
7945 | }, | ||
7946 | { "shl.sn", TILE_OPC_SHL_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
7947 | TREG_SN, /* implicitly_written_register */ | ||
7948 | 1, /* can_bundle */ | ||
7949 | { | ||
7950 | /* operands */ | ||
7951 | { 7, 8, 16 }, | ||
7952 | { 9, 10, 17 }, | ||
7953 | { 0, }, | ||
7954 | { 0, }, | ||
7955 | { 0, } | ||
7956 | }, | ||
7957 | { | ||
7958 | /* fixed_bit_masks */ | ||
7959 | 0x800000007ffc0000ULL, | ||
7960 | 0xfffe000000000000ULL, | ||
7961 | 0ULL, | ||
7962 | 0ULL, | ||
7963 | 0ULL | ||
7964 | }, | ||
7965 | { | ||
7966 | /* fixed_bit_values */ | ||
7967 | 0x0000000009140000ULL, | ||
7968 | 0x0c4c000000000000ULL, | ||
7969 | -1ULL, | ||
7970 | -1ULL, | ||
7971 | -1ULL | ||
7972 | } | ||
7973 | }, | ||
7974 | { "shlb", TILE_OPC_SHLB, 0x3 /* pipes */, 3 /* num_operands */, | ||
7975 | TREG_ZERO, /* implicitly_written_register */ | ||
7976 | 1, /* can_bundle */ | ||
7977 | { | ||
7978 | /* operands */ | ||
7979 | { 7, 8, 16 }, | ||
7980 | { 9, 10, 17 }, | ||
7981 | { 0, }, | ||
7982 | { 0, }, | ||
7983 | { 0, } | ||
7984 | }, | ||
7985 | { | ||
7986 | /* fixed_bit_masks */ | ||
7987 | 0x800000007ffc0000ULL, | ||
7988 | 0xfffe000000000000ULL, | ||
7989 | 0ULL, | ||
7990 | 0ULL, | ||
7991 | 0ULL | ||
7992 | }, | ||
7993 | { | ||
7994 | /* fixed_bit_values */ | ||
7995 | 0x00000000010c0000ULL, | ||
7996 | 0x0848000000000000ULL, | ||
7997 | -1ULL, | ||
7998 | -1ULL, | ||
7999 | -1ULL | ||
8000 | } | ||
8001 | }, | ||
8002 | { "shlb.sn", TILE_OPC_SHLB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8003 | TREG_SN, /* implicitly_written_register */ | ||
8004 | 1, /* can_bundle */ | ||
8005 | { | ||
8006 | /* operands */ | ||
8007 | { 7, 8, 16 }, | ||
8008 | { 9, 10, 17 }, | ||
8009 | { 0, }, | ||
8010 | { 0, }, | ||
8011 | { 0, } | ||
8012 | }, | ||
8013 | { | ||
8014 | /* fixed_bit_masks */ | ||
8015 | 0x800000007ffc0000ULL, | ||
8016 | 0xfffe000000000000ULL, | ||
8017 | 0ULL, | ||
8018 | 0ULL, | ||
8019 | 0ULL | ||
8020 | }, | ||
8021 | { | ||
8022 | /* fixed_bit_values */ | ||
8023 | 0x00000000090c0000ULL, | ||
8024 | 0x0c48000000000000ULL, | ||
8025 | -1ULL, | ||
8026 | -1ULL, | ||
8027 | -1ULL | ||
8028 | } | ||
8029 | }, | ||
8030 | { "shlh", TILE_OPC_SHLH, 0x3 /* pipes */, 3 /* num_operands */, | ||
8031 | TREG_ZERO, /* implicitly_written_register */ | ||
8032 | 1, /* can_bundle */ | ||
8033 | { | ||
8034 | /* operands */ | ||
8035 | { 7, 8, 16 }, | ||
8036 | { 9, 10, 17 }, | ||
8037 | { 0, }, | ||
8038 | { 0, }, | ||
8039 | { 0, } | ||
8040 | }, | ||
8041 | { | ||
8042 | /* fixed_bit_masks */ | ||
8043 | 0x800000007ffc0000ULL, | ||
8044 | 0xfffe000000000000ULL, | ||
8045 | 0ULL, | ||
8046 | 0ULL, | ||
8047 | 0ULL | ||
8048 | }, | ||
8049 | { | ||
8050 | /* fixed_bit_values */ | ||
8051 | 0x0000000001100000ULL, | ||
8052 | 0x084a000000000000ULL, | ||
8053 | -1ULL, | ||
8054 | -1ULL, | ||
8055 | -1ULL | ||
8056 | } | ||
8057 | }, | ||
8058 | { "shlh.sn", TILE_OPC_SHLH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8059 | TREG_SN, /* implicitly_written_register */ | ||
8060 | 1, /* can_bundle */ | ||
8061 | { | ||
8062 | /* operands */ | ||
8063 | { 7, 8, 16 }, | ||
8064 | { 9, 10, 17 }, | ||
8065 | { 0, }, | ||
8066 | { 0, }, | ||
8067 | { 0, } | ||
8068 | }, | ||
8069 | { | ||
8070 | /* fixed_bit_masks */ | ||
8071 | 0x800000007ffc0000ULL, | ||
8072 | 0xfffe000000000000ULL, | ||
8073 | 0ULL, | ||
8074 | 0ULL, | ||
8075 | 0ULL | ||
8076 | }, | ||
8077 | { | ||
8078 | /* fixed_bit_values */ | ||
8079 | 0x0000000009100000ULL, | ||
8080 | 0x0c4a000000000000ULL, | ||
8081 | -1ULL, | ||
8082 | -1ULL, | ||
8083 | -1ULL | ||
8084 | } | ||
8085 | }, | ||
8086 | { "shli", TILE_OPC_SHLI, 0xf /* pipes */, 3 /* num_operands */, | ||
8087 | TREG_ZERO, /* implicitly_written_register */ | ||
8088 | 1, /* can_bundle */ | ||
8089 | { | ||
8090 | /* operands */ | ||
8091 | { 7, 8, 32 }, | ||
8092 | { 9, 10, 33 }, | ||
8093 | { 11, 12, 34 }, | ||
8094 | { 13, 14, 35 }, | ||
8095 | { 0, } | ||
8096 | }, | ||
8097 | { | ||
8098 | /* fixed_bit_masks */ | ||
8099 | 0x800000007ffe0000ULL, | ||
8100 | 0xffff000000000000ULL, | ||
8101 | 0x80000000780e0000ULL, | ||
8102 | 0xf807000000000000ULL, | ||
8103 | 0ULL | ||
8104 | }, | ||
8105 | { | ||
8106 | /* fixed_bit_values */ | ||
8107 | 0x0000000070080000ULL, | ||
8108 | 0x4004000000000000ULL, | ||
8109 | 0x8000000068040000ULL, | ||
8110 | 0xd802000000000000ULL, | ||
8111 | -1ULL | ||
8112 | } | ||
8113 | }, | ||
8114 | { "shli.sn", TILE_OPC_SHLI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8115 | TREG_SN, /* implicitly_written_register */ | ||
8116 | 1, /* can_bundle */ | ||
8117 | { | ||
8118 | /* operands */ | ||
8119 | { 7, 8, 32 }, | ||
8120 | { 9, 10, 33 }, | ||
8121 | { 0, }, | ||
8122 | { 0, }, | ||
8123 | { 0, } | ||
8124 | }, | ||
8125 | { | ||
8126 | /* fixed_bit_masks */ | ||
8127 | 0x800000007ffe0000ULL, | ||
8128 | 0xffff000000000000ULL, | ||
8129 | 0ULL, | ||
8130 | 0ULL, | ||
8131 | 0ULL | ||
8132 | }, | ||
8133 | { | ||
8134 | /* fixed_bit_values */ | ||
8135 | 0x0000000078080000ULL, | ||
8136 | 0x4404000000000000ULL, | ||
8137 | -1ULL, | ||
8138 | -1ULL, | ||
8139 | -1ULL | ||
8140 | } | ||
8141 | }, | ||
8142 | { "shlib", TILE_OPC_SHLIB, 0x3 /* pipes */, 3 /* num_operands */, | ||
8143 | TREG_ZERO, /* implicitly_written_register */ | ||
8144 | 1, /* can_bundle */ | ||
8145 | { | ||
8146 | /* operands */ | ||
8147 | { 7, 8, 32 }, | ||
8148 | { 9, 10, 33 }, | ||
8149 | { 0, }, | ||
8150 | { 0, }, | ||
8151 | { 0, } | ||
8152 | }, | ||
8153 | { | ||
8154 | /* fixed_bit_masks */ | ||
8155 | 0x800000007ffe0000ULL, | ||
8156 | 0xffff000000000000ULL, | ||
8157 | 0ULL, | ||
8158 | 0ULL, | ||
8159 | 0ULL | ||
8160 | }, | ||
8161 | { | ||
8162 | /* fixed_bit_values */ | ||
8163 | 0x0000000070040000ULL, | ||
8164 | 0x4002000000000000ULL, | ||
8165 | -1ULL, | ||
8166 | -1ULL, | ||
8167 | -1ULL | ||
8168 | } | ||
8169 | }, | ||
8170 | { "shlib.sn", TILE_OPC_SHLIB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8171 | TREG_SN, /* implicitly_written_register */ | ||
8172 | 1, /* can_bundle */ | ||
8173 | { | ||
8174 | /* operands */ | ||
8175 | { 7, 8, 32 }, | ||
8176 | { 9, 10, 33 }, | ||
8177 | { 0, }, | ||
8178 | { 0, }, | ||
8179 | { 0, } | ||
8180 | }, | ||
8181 | { | ||
8182 | /* fixed_bit_masks */ | ||
8183 | 0x800000007ffe0000ULL, | ||
8184 | 0xffff000000000000ULL, | ||
8185 | 0ULL, | ||
8186 | 0ULL, | ||
8187 | 0ULL | ||
8188 | }, | ||
8189 | { | ||
8190 | /* fixed_bit_values */ | ||
8191 | 0x0000000078040000ULL, | ||
8192 | 0x4402000000000000ULL, | ||
8193 | -1ULL, | ||
8194 | -1ULL, | ||
8195 | -1ULL | ||
8196 | } | ||
8197 | }, | ||
8198 | { "shlih", TILE_OPC_SHLIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
8199 | TREG_ZERO, /* implicitly_written_register */ | ||
8200 | 1, /* can_bundle */ | ||
8201 | { | ||
8202 | /* operands */ | ||
8203 | { 7, 8, 32 }, | ||
8204 | { 9, 10, 33 }, | ||
8205 | { 0, }, | ||
8206 | { 0, }, | ||
8207 | { 0, } | ||
8208 | }, | ||
8209 | { | ||
8210 | /* fixed_bit_masks */ | ||
8211 | 0x800000007ffe0000ULL, | ||
8212 | 0xffff000000000000ULL, | ||
8213 | 0ULL, | ||
8214 | 0ULL, | ||
8215 | 0ULL | ||
8216 | }, | ||
8217 | { | ||
8218 | /* fixed_bit_values */ | ||
8219 | 0x0000000070060000ULL, | ||
8220 | 0x4003000000000000ULL, | ||
8221 | -1ULL, | ||
8222 | -1ULL, | ||
8223 | -1ULL | ||
8224 | } | ||
8225 | }, | ||
8226 | { "shlih.sn", TILE_OPC_SHLIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8227 | TREG_SN, /* implicitly_written_register */ | ||
8228 | 1, /* can_bundle */ | ||
8229 | { | ||
8230 | /* operands */ | ||
8231 | { 7, 8, 32 }, | ||
8232 | { 9, 10, 33 }, | ||
8233 | { 0, }, | ||
8234 | { 0, }, | ||
8235 | { 0, } | ||
8236 | }, | ||
8237 | { | ||
8238 | /* fixed_bit_masks */ | ||
8239 | 0x800000007ffe0000ULL, | ||
8240 | 0xffff000000000000ULL, | ||
8241 | 0ULL, | ||
8242 | 0ULL, | ||
8243 | 0ULL | ||
8244 | }, | ||
8245 | { | ||
8246 | /* fixed_bit_values */ | ||
8247 | 0x0000000078060000ULL, | ||
8248 | 0x4403000000000000ULL, | ||
8249 | -1ULL, | ||
8250 | -1ULL, | ||
8251 | -1ULL | ||
8252 | } | ||
8253 | }, | ||
8254 | { "shr", TILE_OPC_SHR, 0xf /* pipes */, 3 /* num_operands */, | ||
8255 | TREG_ZERO, /* implicitly_written_register */ | ||
8256 | 1, /* can_bundle */ | ||
8257 | { | ||
8258 | /* operands */ | ||
8259 | { 7, 8, 16 }, | ||
8260 | { 9, 10, 17 }, | ||
8261 | { 11, 12, 18 }, | ||
8262 | { 13, 14, 19 }, | ||
8263 | { 0, } | ||
8264 | }, | ||
8265 | { | ||
8266 | /* fixed_bit_masks */ | ||
8267 | 0x800000007ffc0000ULL, | ||
8268 | 0xfffe000000000000ULL, | ||
8269 | 0x80000000780c0000ULL, | ||
8270 | 0xf806000000000000ULL, | ||
8271 | 0ULL | ||
8272 | }, | ||
8273 | { | ||
8274 | /* fixed_bit_values */ | ||
8275 | 0x0000000001200000ULL, | ||
8276 | 0x0852000000000000ULL, | ||
8277 | 0x8000000020080000ULL, | ||
8278 | 0xa004000000000000ULL, | ||
8279 | -1ULL | ||
8280 | } | ||
8281 | }, | ||
8282 | { "shr.sn", TILE_OPC_SHR_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8283 | TREG_SN, /* implicitly_written_register */ | ||
8284 | 1, /* can_bundle */ | ||
8285 | { | ||
8286 | /* operands */ | ||
8287 | { 7, 8, 16 }, | ||
8288 | { 9, 10, 17 }, | ||
8289 | { 0, }, | ||
8290 | { 0, }, | ||
8291 | { 0, } | ||
8292 | }, | ||
8293 | { | ||
8294 | /* fixed_bit_masks */ | ||
8295 | 0x800000007ffc0000ULL, | ||
8296 | 0xfffe000000000000ULL, | ||
8297 | 0ULL, | ||
8298 | 0ULL, | ||
8299 | 0ULL | ||
8300 | }, | ||
8301 | { | ||
8302 | /* fixed_bit_values */ | ||
8303 | 0x0000000009200000ULL, | ||
8304 | 0x0c52000000000000ULL, | ||
8305 | -1ULL, | ||
8306 | -1ULL, | ||
8307 | -1ULL | ||
8308 | } | ||
8309 | }, | ||
8310 | { "shrb", TILE_OPC_SHRB, 0x3 /* pipes */, 3 /* num_operands */, | ||
8311 | TREG_ZERO, /* implicitly_written_register */ | ||
8312 | 1, /* can_bundle */ | ||
8313 | { | ||
8314 | /* operands */ | ||
8315 | { 7, 8, 16 }, | ||
8316 | { 9, 10, 17 }, | ||
8317 | { 0, }, | ||
8318 | { 0, }, | ||
8319 | { 0, } | ||
8320 | }, | ||
8321 | { | ||
8322 | /* fixed_bit_masks */ | ||
8323 | 0x800000007ffc0000ULL, | ||
8324 | 0xfffe000000000000ULL, | ||
8325 | 0ULL, | ||
8326 | 0ULL, | ||
8327 | 0ULL | ||
8328 | }, | ||
8329 | { | ||
8330 | /* fixed_bit_values */ | ||
8331 | 0x0000000001180000ULL, | ||
8332 | 0x084e000000000000ULL, | ||
8333 | -1ULL, | ||
8334 | -1ULL, | ||
8335 | -1ULL | ||
8336 | } | ||
8337 | }, | ||
8338 | { "shrb.sn", TILE_OPC_SHRB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8339 | TREG_SN, /* implicitly_written_register */ | ||
8340 | 1, /* can_bundle */ | ||
8341 | { | ||
8342 | /* operands */ | ||
8343 | { 7, 8, 16 }, | ||
8344 | { 9, 10, 17 }, | ||
8345 | { 0, }, | ||
8346 | { 0, }, | ||
8347 | { 0, } | ||
8348 | }, | ||
8349 | { | ||
8350 | /* fixed_bit_masks */ | ||
8351 | 0x800000007ffc0000ULL, | ||
8352 | 0xfffe000000000000ULL, | ||
8353 | 0ULL, | ||
8354 | 0ULL, | ||
8355 | 0ULL | ||
8356 | }, | ||
8357 | { | ||
8358 | /* fixed_bit_values */ | ||
8359 | 0x0000000009180000ULL, | ||
8360 | 0x0c4e000000000000ULL, | ||
8361 | -1ULL, | ||
8362 | -1ULL, | ||
8363 | -1ULL | ||
8364 | } | ||
8365 | }, | ||
8366 | { "shrh", TILE_OPC_SHRH, 0x3 /* pipes */, 3 /* num_operands */, | ||
8367 | TREG_ZERO, /* implicitly_written_register */ | ||
8368 | 1, /* can_bundle */ | ||
8369 | { | ||
8370 | /* operands */ | ||
8371 | { 7, 8, 16 }, | ||
8372 | { 9, 10, 17 }, | ||
8373 | { 0, }, | ||
8374 | { 0, }, | ||
8375 | { 0, } | ||
8376 | }, | ||
8377 | { | ||
8378 | /* fixed_bit_masks */ | ||
8379 | 0x800000007ffc0000ULL, | ||
8380 | 0xfffe000000000000ULL, | ||
8381 | 0ULL, | ||
8382 | 0ULL, | ||
8383 | 0ULL | ||
8384 | }, | ||
8385 | { | ||
8386 | /* fixed_bit_values */ | ||
8387 | 0x00000000011c0000ULL, | ||
8388 | 0x0850000000000000ULL, | ||
8389 | -1ULL, | ||
8390 | -1ULL, | ||
8391 | -1ULL | ||
8392 | } | ||
8393 | }, | ||
8394 | { "shrh.sn", TILE_OPC_SHRH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8395 | TREG_SN, /* implicitly_written_register */ | ||
8396 | 1, /* can_bundle */ | ||
8397 | { | ||
8398 | /* operands */ | ||
8399 | { 7, 8, 16 }, | ||
8400 | { 9, 10, 17 }, | ||
8401 | { 0, }, | ||
8402 | { 0, }, | ||
8403 | { 0, } | ||
8404 | }, | ||
8405 | { | ||
8406 | /* fixed_bit_masks */ | ||
8407 | 0x800000007ffc0000ULL, | ||
8408 | 0xfffe000000000000ULL, | ||
8409 | 0ULL, | ||
8410 | 0ULL, | ||
8411 | 0ULL | ||
8412 | }, | ||
8413 | { | ||
8414 | /* fixed_bit_values */ | ||
8415 | 0x00000000091c0000ULL, | ||
8416 | 0x0c50000000000000ULL, | ||
8417 | -1ULL, | ||
8418 | -1ULL, | ||
8419 | -1ULL | ||
8420 | } | ||
8421 | }, | ||
8422 | { "shri", TILE_OPC_SHRI, 0xf /* pipes */, 3 /* num_operands */, | ||
8423 | TREG_ZERO, /* implicitly_written_register */ | ||
8424 | 1, /* can_bundle */ | ||
8425 | { | ||
8426 | /* operands */ | ||
8427 | { 7, 8, 32 }, | ||
8428 | { 9, 10, 33 }, | ||
8429 | { 11, 12, 34 }, | ||
8430 | { 13, 14, 35 }, | ||
8431 | { 0, } | ||
8432 | }, | ||
8433 | { | ||
8434 | /* fixed_bit_masks */ | ||
8435 | 0x800000007ffe0000ULL, | ||
8436 | 0xffff000000000000ULL, | ||
8437 | 0x80000000780e0000ULL, | ||
8438 | 0xf807000000000000ULL, | ||
8439 | 0ULL | ||
8440 | }, | ||
8441 | { | ||
8442 | /* fixed_bit_values */ | ||
8443 | 0x00000000700e0000ULL, | ||
8444 | 0x4007000000000000ULL, | ||
8445 | 0x8000000068060000ULL, | ||
8446 | 0xd803000000000000ULL, | ||
8447 | -1ULL | ||
8448 | } | ||
8449 | }, | ||
8450 | { "shri.sn", TILE_OPC_SHRI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8451 | TREG_SN, /* implicitly_written_register */ | ||
8452 | 1, /* can_bundle */ | ||
8453 | { | ||
8454 | /* operands */ | ||
8455 | { 7, 8, 32 }, | ||
8456 | { 9, 10, 33 }, | ||
8457 | { 0, }, | ||
8458 | { 0, }, | ||
8459 | { 0, } | ||
8460 | }, | ||
8461 | { | ||
8462 | /* fixed_bit_masks */ | ||
8463 | 0x800000007ffe0000ULL, | ||
8464 | 0xffff000000000000ULL, | ||
8465 | 0ULL, | ||
8466 | 0ULL, | ||
8467 | 0ULL | ||
8468 | }, | ||
8469 | { | ||
8470 | /* fixed_bit_values */ | ||
8471 | 0x00000000780e0000ULL, | ||
8472 | 0x4407000000000000ULL, | ||
8473 | -1ULL, | ||
8474 | -1ULL, | ||
8475 | -1ULL | ||
8476 | } | ||
8477 | }, | ||
8478 | { "shrib", TILE_OPC_SHRIB, 0x3 /* pipes */, 3 /* num_operands */, | ||
8479 | TREG_ZERO, /* implicitly_written_register */ | ||
8480 | 1, /* can_bundle */ | ||
8481 | { | ||
8482 | /* operands */ | ||
8483 | { 7, 8, 32 }, | ||
8484 | { 9, 10, 33 }, | ||
8485 | { 0, }, | ||
8486 | { 0, }, | ||
8487 | { 0, } | ||
8488 | }, | ||
8489 | { | ||
8490 | /* fixed_bit_masks */ | ||
8491 | 0x800000007ffe0000ULL, | ||
8492 | 0xffff000000000000ULL, | ||
8493 | 0ULL, | ||
8494 | 0ULL, | ||
8495 | 0ULL | ||
8496 | }, | ||
8497 | { | ||
8498 | /* fixed_bit_values */ | ||
8499 | 0x00000000700a0000ULL, | ||
8500 | 0x4005000000000000ULL, | ||
8501 | -1ULL, | ||
8502 | -1ULL, | ||
8503 | -1ULL | ||
8504 | } | ||
8505 | }, | ||
8506 | { "shrib.sn", TILE_OPC_SHRIB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8507 | TREG_SN, /* implicitly_written_register */ | ||
8508 | 1, /* can_bundle */ | ||
8509 | { | ||
8510 | /* operands */ | ||
8511 | { 7, 8, 32 }, | ||
8512 | { 9, 10, 33 }, | ||
8513 | { 0, }, | ||
8514 | { 0, }, | ||
8515 | { 0, } | ||
8516 | }, | ||
8517 | { | ||
8518 | /* fixed_bit_masks */ | ||
8519 | 0x800000007ffe0000ULL, | ||
8520 | 0xffff000000000000ULL, | ||
8521 | 0ULL, | ||
8522 | 0ULL, | ||
8523 | 0ULL | ||
8524 | }, | ||
8525 | { | ||
8526 | /* fixed_bit_values */ | ||
8527 | 0x00000000780a0000ULL, | ||
8528 | 0x4405000000000000ULL, | ||
8529 | -1ULL, | ||
8530 | -1ULL, | ||
8531 | -1ULL | ||
8532 | } | ||
8533 | }, | ||
8534 | { "shrih", TILE_OPC_SHRIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
8535 | TREG_ZERO, /* implicitly_written_register */ | ||
8536 | 1, /* can_bundle */ | ||
8537 | { | ||
8538 | /* operands */ | ||
8539 | { 7, 8, 32 }, | ||
8540 | { 9, 10, 33 }, | ||
8541 | { 0, }, | ||
8542 | { 0, }, | ||
8543 | { 0, } | ||
8544 | }, | ||
8545 | { | ||
8546 | /* fixed_bit_masks */ | ||
8547 | 0x800000007ffe0000ULL, | ||
8548 | 0xffff000000000000ULL, | ||
8549 | 0ULL, | ||
8550 | 0ULL, | ||
8551 | 0ULL | ||
8552 | }, | ||
8553 | { | ||
8554 | /* fixed_bit_values */ | ||
8555 | 0x00000000700c0000ULL, | ||
8556 | 0x4006000000000000ULL, | ||
8557 | -1ULL, | ||
8558 | -1ULL, | ||
8559 | -1ULL | ||
8560 | } | ||
8561 | }, | ||
8562 | { "shrih.sn", TILE_OPC_SHRIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8563 | TREG_SN, /* implicitly_written_register */ | ||
8564 | 1, /* can_bundle */ | ||
8565 | { | ||
8566 | /* operands */ | ||
8567 | { 7, 8, 32 }, | ||
8568 | { 9, 10, 33 }, | ||
8569 | { 0, }, | ||
8570 | { 0, }, | ||
8571 | { 0, } | ||
8572 | }, | ||
8573 | { | ||
8574 | /* fixed_bit_masks */ | ||
8575 | 0x800000007ffe0000ULL, | ||
8576 | 0xffff000000000000ULL, | ||
8577 | 0ULL, | ||
8578 | 0ULL, | ||
8579 | 0ULL | ||
8580 | }, | ||
8581 | { | ||
8582 | /* fixed_bit_values */ | ||
8583 | 0x00000000780c0000ULL, | ||
8584 | 0x4406000000000000ULL, | ||
8585 | -1ULL, | ||
8586 | -1ULL, | ||
8587 | -1ULL | ||
8588 | } | ||
8589 | }, | ||
8590 | { "slt", TILE_OPC_SLT, 0xf /* pipes */, 3 /* num_operands */, | ||
8591 | TREG_ZERO, /* implicitly_written_register */ | ||
8592 | 1, /* can_bundle */ | ||
8593 | { | ||
8594 | /* operands */ | ||
8595 | { 7, 8, 16 }, | ||
8596 | { 9, 10, 17 }, | ||
8597 | { 11, 12, 18 }, | ||
8598 | { 13, 14, 19 }, | ||
8599 | { 0, } | ||
8600 | }, | ||
8601 | { | ||
8602 | /* fixed_bit_masks */ | ||
8603 | 0x800000007ffc0000ULL, | ||
8604 | 0xfffe000000000000ULL, | ||
8605 | 0x80000000780c0000ULL, | ||
8606 | 0xf806000000000000ULL, | ||
8607 | 0ULL | ||
8608 | }, | ||
8609 | { | ||
8610 | /* fixed_bit_values */ | ||
8611 | 0x00000000014c0000ULL, | ||
8612 | 0x086a000000000000ULL, | ||
8613 | 0x8000000028080000ULL, | ||
8614 | 0xa804000000000000ULL, | ||
8615 | -1ULL | ||
8616 | } | ||
8617 | }, | ||
8618 | { "slt.sn", TILE_OPC_SLT_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8619 | TREG_SN, /* implicitly_written_register */ | ||
8620 | 1, /* can_bundle */ | ||
8621 | { | ||
8622 | /* operands */ | ||
8623 | { 7, 8, 16 }, | ||
8624 | { 9, 10, 17 }, | ||
8625 | { 0, }, | ||
8626 | { 0, }, | ||
8627 | { 0, } | ||
8628 | }, | ||
8629 | { | ||
8630 | /* fixed_bit_masks */ | ||
8631 | 0x800000007ffc0000ULL, | ||
8632 | 0xfffe000000000000ULL, | ||
8633 | 0ULL, | ||
8634 | 0ULL, | ||
8635 | 0ULL | ||
8636 | }, | ||
8637 | { | ||
8638 | /* fixed_bit_values */ | ||
8639 | 0x00000000094c0000ULL, | ||
8640 | 0x0c6a000000000000ULL, | ||
8641 | -1ULL, | ||
8642 | -1ULL, | ||
8643 | -1ULL | ||
8644 | } | ||
8645 | }, | ||
8646 | { "slt_u", TILE_OPC_SLT_U, 0xf /* pipes */, 3 /* num_operands */, | ||
8647 | TREG_ZERO, /* implicitly_written_register */ | ||
8648 | 1, /* can_bundle */ | ||
8649 | { | ||
8650 | /* operands */ | ||
8651 | { 7, 8, 16 }, | ||
8652 | { 9, 10, 17 }, | ||
8653 | { 11, 12, 18 }, | ||
8654 | { 13, 14, 19 }, | ||
8655 | { 0, } | ||
8656 | }, | ||
8657 | { | ||
8658 | /* fixed_bit_masks */ | ||
8659 | 0x800000007ffc0000ULL, | ||
8660 | 0xfffe000000000000ULL, | ||
8661 | 0x80000000780c0000ULL, | ||
8662 | 0xf806000000000000ULL, | ||
8663 | 0ULL | ||
8664 | }, | ||
8665 | { | ||
8666 | /* fixed_bit_values */ | ||
8667 | 0x0000000001500000ULL, | ||
8668 | 0x086c000000000000ULL, | ||
8669 | 0x80000000280c0000ULL, | ||
8670 | 0xa806000000000000ULL, | ||
8671 | -1ULL | ||
8672 | } | ||
8673 | }, | ||
8674 | { "slt_u.sn", TILE_OPC_SLT_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8675 | TREG_SN, /* implicitly_written_register */ | ||
8676 | 1, /* can_bundle */ | ||
8677 | { | ||
8678 | /* operands */ | ||
8679 | { 7, 8, 16 }, | ||
8680 | { 9, 10, 17 }, | ||
8681 | { 0, }, | ||
8682 | { 0, }, | ||
8683 | { 0, } | ||
8684 | }, | ||
8685 | { | ||
8686 | /* fixed_bit_masks */ | ||
8687 | 0x800000007ffc0000ULL, | ||
8688 | 0xfffe000000000000ULL, | ||
8689 | 0ULL, | ||
8690 | 0ULL, | ||
8691 | 0ULL | ||
8692 | }, | ||
8693 | { | ||
8694 | /* fixed_bit_values */ | ||
8695 | 0x0000000009500000ULL, | ||
8696 | 0x0c6c000000000000ULL, | ||
8697 | -1ULL, | ||
8698 | -1ULL, | ||
8699 | -1ULL | ||
8700 | } | ||
8701 | }, | ||
8702 | { "sltb", TILE_OPC_SLTB, 0x3 /* pipes */, 3 /* num_operands */, | ||
8703 | TREG_ZERO, /* implicitly_written_register */ | ||
8704 | 1, /* can_bundle */ | ||
8705 | { | ||
8706 | /* operands */ | ||
8707 | { 7, 8, 16 }, | ||
8708 | { 9, 10, 17 }, | ||
8709 | { 0, }, | ||
8710 | { 0, }, | ||
8711 | { 0, } | ||
8712 | }, | ||
8713 | { | ||
8714 | /* fixed_bit_masks */ | ||
8715 | 0x800000007ffc0000ULL, | ||
8716 | 0xfffe000000000000ULL, | ||
8717 | 0ULL, | ||
8718 | 0ULL, | ||
8719 | 0ULL | ||
8720 | }, | ||
8721 | { | ||
8722 | /* fixed_bit_values */ | ||
8723 | 0x0000000001240000ULL, | ||
8724 | 0x0856000000000000ULL, | ||
8725 | -1ULL, | ||
8726 | -1ULL, | ||
8727 | -1ULL | ||
8728 | } | ||
8729 | }, | ||
8730 | { "sltb.sn", TILE_OPC_SLTB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8731 | TREG_SN, /* implicitly_written_register */ | ||
8732 | 1, /* can_bundle */ | ||
8733 | { | ||
8734 | /* operands */ | ||
8735 | { 7, 8, 16 }, | ||
8736 | { 9, 10, 17 }, | ||
8737 | { 0, }, | ||
8738 | { 0, }, | ||
8739 | { 0, } | ||
8740 | }, | ||
8741 | { | ||
8742 | /* fixed_bit_masks */ | ||
8743 | 0x800000007ffc0000ULL, | ||
8744 | 0xfffe000000000000ULL, | ||
8745 | 0ULL, | ||
8746 | 0ULL, | ||
8747 | 0ULL | ||
8748 | }, | ||
8749 | { | ||
8750 | /* fixed_bit_values */ | ||
8751 | 0x0000000009240000ULL, | ||
8752 | 0x0c56000000000000ULL, | ||
8753 | -1ULL, | ||
8754 | -1ULL, | ||
8755 | -1ULL | ||
8756 | } | ||
8757 | }, | ||
8758 | { "sltb_u", TILE_OPC_SLTB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
8759 | TREG_ZERO, /* implicitly_written_register */ | ||
8760 | 1, /* can_bundle */ | ||
8761 | { | ||
8762 | /* operands */ | ||
8763 | { 7, 8, 16 }, | ||
8764 | { 9, 10, 17 }, | ||
8765 | { 0, }, | ||
8766 | { 0, }, | ||
8767 | { 0, } | ||
8768 | }, | ||
8769 | { | ||
8770 | /* fixed_bit_masks */ | ||
8771 | 0x800000007ffc0000ULL, | ||
8772 | 0xfffe000000000000ULL, | ||
8773 | 0ULL, | ||
8774 | 0ULL, | ||
8775 | 0ULL | ||
8776 | }, | ||
8777 | { | ||
8778 | /* fixed_bit_values */ | ||
8779 | 0x0000000001280000ULL, | ||
8780 | 0x0858000000000000ULL, | ||
8781 | -1ULL, | ||
8782 | -1ULL, | ||
8783 | -1ULL | ||
8784 | } | ||
8785 | }, | ||
8786 | { "sltb_u.sn", TILE_OPC_SLTB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8787 | TREG_SN, /* implicitly_written_register */ | ||
8788 | 1, /* can_bundle */ | ||
8789 | { | ||
8790 | /* operands */ | ||
8791 | { 7, 8, 16 }, | ||
8792 | { 9, 10, 17 }, | ||
8793 | { 0, }, | ||
8794 | { 0, }, | ||
8795 | { 0, } | ||
8796 | }, | ||
8797 | { | ||
8798 | /* fixed_bit_masks */ | ||
8799 | 0x800000007ffc0000ULL, | ||
8800 | 0xfffe000000000000ULL, | ||
8801 | 0ULL, | ||
8802 | 0ULL, | ||
8803 | 0ULL | ||
8804 | }, | ||
8805 | { | ||
8806 | /* fixed_bit_values */ | ||
8807 | 0x0000000009280000ULL, | ||
8808 | 0x0c58000000000000ULL, | ||
8809 | -1ULL, | ||
8810 | -1ULL, | ||
8811 | -1ULL | ||
8812 | } | ||
8813 | }, | ||
8814 | { "slte", TILE_OPC_SLTE, 0xf /* pipes */, 3 /* num_operands */, | ||
8815 | TREG_ZERO, /* implicitly_written_register */ | ||
8816 | 1, /* can_bundle */ | ||
8817 | { | ||
8818 | /* operands */ | ||
8819 | { 7, 8, 16 }, | ||
8820 | { 9, 10, 17 }, | ||
8821 | { 11, 12, 18 }, | ||
8822 | { 13, 14, 19 }, | ||
8823 | { 0, } | ||
8824 | }, | ||
8825 | { | ||
8826 | /* fixed_bit_masks */ | ||
8827 | 0x800000007ffc0000ULL, | ||
8828 | 0xfffe000000000000ULL, | ||
8829 | 0x80000000780c0000ULL, | ||
8830 | 0xf806000000000000ULL, | ||
8831 | 0ULL | ||
8832 | }, | ||
8833 | { | ||
8834 | /* fixed_bit_values */ | ||
8835 | 0x00000000013c0000ULL, | ||
8836 | 0x0862000000000000ULL, | ||
8837 | 0x8000000028000000ULL, | ||
8838 | 0xa800000000000000ULL, | ||
8839 | -1ULL | ||
8840 | } | ||
8841 | }, | ||
8842 | { "slte.sn", TILE_OPC_SLTE_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8843 | TREG_SN, /* implicitly_written_register */ | ||
8844 | 1, /* can_bundle */ | ||
8845 | { | ||
8846 | /* operands */ | ||
8847 | { 7, 8, 16 }, | ||
8848 | { 9, 10, 17 }, | ||
8849 | { 0, }, | ||
8850 | { 0, }, | ||
8851 | { 0, } | ||
8852 | }, | ||
8853 | { | ||
8854 | /* fixed_bit_masks */ | ||
8855 | 0x800000007ffc0000ULL, | ||
8856 | 0xfffe000000000000ULL, | ||
8857 | 0ULL, | ||
8858 | 0ULL, | ||
8859 | 0ULL | ||
8860 | }, | ||
8861 | { | ||
8862 | /* fixed_bit_values */ | ||
8863 | 0x00000000093c0000ULL, | ||
8864 | 0x0c62000000000000ULL, | ||
8865 | -1ULL, | ||
8866 | -1ULL, | ||
8867 | -1ULL | ||
8868 | } | ||
8869 | }, | ||
8870 | { "slte_u", TILE_OPC_SLTE_U, 0xf /* pipes */, 3 /* num_operands */, | ||
8871 | TREG_ZERO, /* implicitly_written_register */ | ||
8872 | 1, /* can_bundle */ | ||
8873 | { | ||
8874 | /* operands */ | ||
8875 | { 7, 8, 16 }, | ||
8876 | { 9, 10, 17 }, | ||
8877 | { 11, 12, 18 }, | ||
8878 | { 13, 14, 19 }, | ||
8879 | { 0, } | ||
8880 | }, | ||
8881 | { | ||
8882 | /* fixed_bit_masks */ | ||
8883 | 0x800000007ffc0000ULL, | ||
8884 | 0xfffe000000000000ULL, | ||
8885 | 0x80000000780c0000ULL, | ||
8886 | 0xf806000000000000ULL, | ||
8887 | 0ULL | ||
8888 | }, | ||
8889 | { | ||
8890 | /* fixed_bit_values */ | ||
8891 | 0x0000000001400000ULL, | ||
8892 | 0x0864000000000000ULL, | ||
8893 | 0x8000000028040000ULL, | ||
8894 | 0xa802000000000000ULL, | ||
8895 | -1ULL | ||
8896 | } | ||
8897 | }, | ||
8898 | { "slte_u.sn", TILE_OPC_SLTE_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8899 | TREG_SN, /* implicitly_written_register */ | ||
8900 | 1, /* can_bundle */ | ||
8901 | { | ||
8902 | /* operands */ | ||
8903 | { 7, 8, 16 }, | ||
8904 | { 9, 10, 17 }, | ||
8905 | { 0, }, | ||
8906 | { 0, }, | ||
8907 | { 0, } | ||
8908 | }, | ||
8909 | { | ||
8910 | /* fixed_bit_masks */ | ||
8911 | 0x800000007ffc0000ULL, | ||
8912 | 0xfffe000000000000ULL, | ||
8913 | 0ULL, | ||
8914 | 0ULL, | ||
8915 | 0ULL | ||
8916 | }, | ||
8917 | { | ||
8918 | /* fixed_bit_values */ | ||
8919 | 0x0000000009400000ULL, | ||
8920 | 0x0c64000000000000ULL, | ||
8921 | -1ULL, | ||
8922 | -1ULL, | ||
8923 | -1ULL | ||
8924 | } | ||
8925 | }, | ||
8926 | { "slteb", TILE_OPC_SLTEB, 0x3 /* pipes */, 3 /* num_operands */, | ||
8927 | TREG_ZERO, /* implicitly_written_register */ | ||
8928 | 1, /* can_bundle */ | ||
8929 | { | ||
8930 | /* operands */ | ||
8931 | { 7, 8, 16 }, | ||
8932 | { 9, 10, 17 }, | ||
8933 | { 0, }, | ||
8934 | { 0, }, | ||
8935 | { 0, } | ||
8936 | }, | ||
8937 | { | ||
8938 | /* fixed_bit_masks */ | ||
8939 | 0x800000007ffc0000ULL, | ||
8940 | 0xfffe000000000000ULL, | ||
8941 | 0ULL, | ||
8942 | 0ULL, | ||
8943 | 0ULL | ||
8944 | }, | ||
8945 | { | ||
8946 | /* fixed_bit_values */ | ||
8947 | 0x00000000012c0000ULL, | ||
8948 | 0x085a000000000000ULL, | ||
8949 | -1ULL, | ||
8950 | -1ULL, | ||
8951 | -1ULL | ||
8952 | } | ||
8953 | }, | ||
8954 | { "slteb.sn", TILE_OPC_SLTEB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
8955 | TREG_SN, /* implicitly_written_register */ | ||
8956 | 1, /* can_bundle */ | ||
8957 | { | ||
8958 | /* operands */ | ||
8959 | { 7, 8, 16 }, | ||
8960 | { 9, 10, 17 }, | ||
8961 | { 0, }, | ||
8962 | { 0, }, | ||
8963 | { 0, } | ||
8964 | }, | ||
8965 | { | ||
8966 | /* fixed_bit_masks */ | ||
8967 | 0x800000007ffc0000ULL, | ||
8968 | 0xfffe000000000000ULL, | ||
8969 | 0ULL, | ||
8970 | 0ULL, | ||
8971 | 0ULL | ||
8972 | }, | ||
8973 | { | ||
8974 | /* fixed_bit_values */ | ||
8975 | 0x00000000092c0000ULL, | ||
8976 | 0x0c5a000000000000ULL, | ||
8977 | -1ULL, | ||
8978 | -1ULL, | ||
8979 | -1ULL | ||
8980 | } | ||
8981 | }, | ||
8982 | { "slteb_u", TILE_OPC_SLTEB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
8983 | TREG_ZERO, /* implicitly_written_register */ | ||
8984 | 1, /* can_bundle */ | ||
8985 | { | ||
8986 | /* operands */ | ||
8987 | { 7, 8, 16 }, | ||
8988 | { 9, 10, 17 }, | ||
8989 | { 0, }, | ||
8990 | { 0, }, | ||
8991 | { 0, } | ||
8992 | }, | ||
8993 | { | ||
8994 | /* fixed_bit_masks */ | ||
8995 | 0x800000007ffc0000ULL, | ||
8996 | 0xfffe000000000000ULL, | ||
8997 | 0ULL, | ||
8998 | 0ULL, | ||
8999 | 0ULL | ||
9000 | }, | ||
9001 | { | ||
9002 | /* fixed_bit_values */ | ||
9003 | 0x0000000001300000ULL, | ||
9004 | 0x085c000000000000ULL, | ||
9005 | -1ULL, | ||
9006 | -1ULL, | ||
9007 | -1ULL | ||
9008 | } | ||
9009 | }, | ||
9010 | { "slteb_u.sn", TILE_OPC_SLTEB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9011 | TREG_SN, /* implicitly_written_register */ | ||
9012 | 1, /* can_bundle */ | ||
9013 | { | ||
9014 | /* operands */ | ||
9015 | { 7, 8, 16 }, | ||
9016 | { 9, 10, 17 }, | ||
9017 | { 0, }, | ||
9018 | { 0, }, | ||
9019 | { 0, } | ||
9020 | }, | ||
9021 | { | ||
9022 | /* fixed_bit_masks */ | ||
9023 | 0x800000007ffc0000ULL, | ||
9024 | 0xfffe000000000000ULL, | ||
9025 | 0ULL, | ||
9026 | 0ULL, | ||
9027 | 0ULL | ||
9028 | }, | ||
9029 | { | ||
9030 | /* fixed_bit_values */ | ||
9031 | 0x0000000009300000ULL, | ||
9032 | 0x0c5c000000000000ULL, | ||
9033 | -1ULL, | ||
9034 | -1ULL, | ||
9035 | -1ULL | ||
9036 | } | ||
9037 | }, | ||
9038 | { "slteh", TILE_OPC_SLTEH, 0x3 /* pipes */, 3 /* num_operands */, | ||
9039 | TREG_ZERO, /* implicitly_written_register */ | ||
9040 | 1, /* can_bundle */ | ||
9041 | { | ||
9042 | /* operands */ | ||
9043 | { 7, 8, 16 }, | ||
9044 | { 9, 10, 17 }, | ||
9045 | { 0, }, | ||
9046 | { 0, }, | ||
9047 | { 0, } | ||
9048 | }, | ||
9049 | { | ||
9050 | /* fixed_bit_masks */ | ||
9051 | 0x800000007ffc0000ULL, | ||
9052 | 0xfffe000000000000ULL, | ||
9053 | 0ULL, | ||
9054 | 0ULL, | ||
9055 | 0ULL | ||
9056 | }, | ||
9057 | { | ||
9058 | /* fixed_bit_values */ | ||
9059 | 0x0000000001340000ULL, | ||
9060 | 0x085e000000000000ULL, | ||
9061 | -1ULL, | ||
9062 | -1ULL, | ||
9063 | -1ULL | ||
9064 | } | ||
9065 | }, | ||
9066 | { "slteh.sn", TILE_OPC_SLTEH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9067 | TREG_SN, /* implicitly_written_register */ | ||
9068 | 1, /* can_bundle */ | ||
9069 | { | ||
9070 | /* operands */ | ||
9071 | { 7, 8, 16 }, | ||
9072 | { 9, 10, 17 }, | ||
9073 | { 0, }, | ||
9074 | { 0, }, | ||
9075 | { 0, } | ||
9076 | }, | ||
9077 | { | ||
9078 | /* fixed_bit_masks */ | ||
9079 | 0x800000007ffc0000ULL, | ||
9080 | 0xfffe000000000000ULL, | ||
9081 | 0ULL, | ||
9082 | 0ULL, | ||
9083 | 0ULL | ||
9084 | }, | ||
9085 | { | ||
9086 | /* fixed_bit_values */ | ||
9087 | 0x0000000009340000ULL, | ||
9088 | 0x0c5e000000000000ULL, | ||
9089 | -1ULL, | ||
9090 | -1ULL, | ||
9091 | -1ULL | ||
9092 | } | ||
9093 | }, | ||
9094 | { "slteh_u", TILE_OPC_SLTEH_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
9095 | TREG_ZERO, /* implicitly_written_register */ | ||
9096 | 1, /* can_bundle */ | ||
9097 | { | ||
9098 | /* operands */ | ||
9099 | { 7, 8, 16 }, | ||
9100 | { 9, 10, 17 }, | ||
9101 | { 0, }, | ||
9102 | { 0, }, | ||
9103 | { 0, } | ||
9104 | }, | ||
9105 | { | ||
9106 | /* fixed_bit_masks */ | ||
9107 | 0x800000007ffc0000ULL, | ||
9108 | 0xfffe000000000000ULL, | ||
9109 | 0ULL, | ||
9110 | 0ULL, | ||
9111 | 0ULL | ||
9112 | }, | ||
9113 | { | ||
9114 | /* fixed_bit_values */ | ||
9115 | 0x0000000001380000ULL, | ||
9116 | 0x0860000000000000ULL, | ||
9117 | -1ULL, | ||
9118 | -1ULL, | ||
9119 | -1ULL | ||
9120 | } | ||
9121 | }, | ||
9122 | { "slteh_u.sn", TILE_OPC_SLTEH_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9123 | TREG_SN, /* implicitly_written_register */ | ||
9124 | 1, /* can_bundle */ | ||
9125 | { | ||
9126 | /* operands */ | ||
9127 | { 7, 8, 16 }, | ||
9128 | { 9, 10, 17 }, | ||
9129 | { 0, }, | ||
9130 | { 0, }, | ||
9131 | { 0, } | ||
9132 | }, | ||
9133 | { | ||
9134 | /* fixed_bit_masks */ | ||
9135 | 0x800000007ffc0000ULL, | ||
9136 | 0xfffe000000000000ULL, | ||
9137 | 0ULL, | ||
9138 | 0ULL, | ||
9139 | 0ULL | ||
9140 | }, | ||
9141 | { | ||
9142 | /* fixed_bit_values */ | ||
9143 | 0x0000000009380000ULL, | ||
9144 | 0x0c60000000000000ULL, | ||
9145 | -1ULL, | ||
9146 | -1ULL, | ||
9147 | -1ULL | ||
9148 | } | ||
9149 | }, | ||
9150 | { "slth", TILE_OPC_SLTH, 0x3 /* pipes */, 3 /* num_operands */, | ||
9151 | TREG_ZERO, /* implicitly_written_register */ | ||
9152 | 1, /* can_bundle */ | ||
9153 | { | ||
9154 | /* operands */ | ||
9155 | { 7, 8, 16 }, | ||
9156 | { 9, 10, 17 }, | ||
9157 | { 0, }, | ||
9158 | { 0, }, | ||
9159 | { 0, } | ||
9160 | }, | ||
9161 | { | ||
9162 | /* fixed_bit_masks */ | ||
9163 | 0x800000007ffc0000ULL, | ||
9164 | 0xfffe000000000000ULL, | ||
9165 | 0ULL, | ||
9166 | 0ULL, | ||
9167 | 0ULL | ||
9168 | }, | ||
9169 | { | ||
9170 | /* fixed_bit_values */ | ||
9171 | 0x0000000001440000ULL, | ||
9172 | 0x0866000000000000ULL, | ||
9173 | -1ULL, | ||
9174 | -1ULL, | ||
9175 | -1ULL | ||
9176 | } | ||
9177 | }, | ||
9178 | { "slth.sn", TILE_OPC_SLTH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9179 | TREG_SN, /* implicitly_written_register */ | ||
9180 | 1, /* can_bundle */ | ||
9181 | { | ||
9182 | /* operands */ | ||
9183 | { 7, 8, 16 }, | ||
9184 | { 9, 10, 17 }, | ||
9185 | { 0, }, | ||
9186 | { 0, }, | ||
9187 | { 0, } | ||
9188 | }, | ||
9189 | { | ||
9190 | /* fixed_bit_masks */ | ||
9191 | 0x800000007ffc0000ULL, | ||
9192 | 0xfffe000000000000ULL, | ||
9193 | 0ULL, | ||
9194 | 0ULL, | ||
9195 | 0ULL | ||
9196 | }, | ||
9197 | { | ||
9198 | /* fixed_bit_values */ | ||
9199 | 0x0000000009440000ULL, | ||
9200 | 0x0c66000000000000ULL, | ||
9201 | -1ULL, | ||
9202 | -1ULL, | ||
9203 | -1ULL | ||
9204 | } | ||
9205 | }, | ||
9206 | { "slth_u", TILE_OPC_SLTH_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
9207 | TREG_ZERO, /* implicitly_written_register */ | ||
9208 | 1, /* can_bundle */ | ||
9209 | { | ||
9210 | /* operands */ | ||
9211 | { 7, 8, 16 }, | ||
9212 | { 9, 10, 17 }, | ||
9213 | { 0, }, | ||
9214 | { 0, }, | ||
9215 | { 0, } | ||
9216 | }, | ||
9217 | { | ||
9218 | /* fixed_bit_masks */ | ||
9219 | 0x800000007ffc0000ULL, | ||
9220 | 0xfffe000000000000ULL, | ||
9221 | 0ULL, | ||
9222 | 0ULL, | ||
9223 | 0ULL | ||
9224 | }, | ||
9225 | { | ||
9226 | /* fixed_bit_values */ | ||
9227 | 0x0000000001480000ULL, | ||
9228 | 0x0868000000000000ULL, | ||
9229 | -1ULL, | ||
9230 | -1ULL, | ||
9231 | -1ULL | ||
9232 | } | ||
9233 | }, | ||
9234 | { "slth_u.sn", TILE_OPC_SLTH_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9235 | TREG_SN, /* implicitly_written_register */ | ||
9236 | 1, /* can_bundle */ | ||
9237 | { | ||
9238 | /* operands */ | ||
9239 | { 7, 8, 16 }, | ||
9240 | { 9, 10, 17 }, | ||
9241 | { 0, }, | ||
9242 | { 0, }, | ||
9243 | { 0, } | ||
9244 | }, | ||
9245 | { | ||
9246 | /* fixed_bit_masks */ | ||
9247 | 0x800000007ffc0000ULL, | ||
9248 | 0xfffe000000000000ULL, | ||
9249 | 0ULL, | ||
9250 | 0ULL, | ||
9251 | 0ULL | ||
9252 | }, | ||
9253 | { | ||
9254 | /* fixed_bit_values */ | ||
9255 | 0x0000000009480000ULL, | ||
9256 | 0x0c68000000000000ULL, | ||
9257 | -1ULL, | ||
9258 | -1ULL, | ||
9259 | -1ULL | ||
9260 | } | ||
9261 | }, | ||
9262 | { "slti", TILE_OPC_SLTI, 0xf /* pipes */, 3 /* num_operands */, | ||
9263 | TREG_ZERO, /* implicitly_written_register */ | ||
9264 | 1, /* can_bundle */ | ||
9265 | { | ||
9266 | /* operands */ | ||
9267 | { 7, 8, 0 }, | ||
9268 | { 9, 10, 1 }, | ||
9269 | { 11, 12, 2 }, | ||
9270 | { 13, 14, 3 }, | ||
9271 | { 0, } | ||
9272 | }, | ||
9273 | { | ||
9274 | /* fixed_bit_masks */ | ||
9275 | 0x800000007ff00000ULL, | ||
9276 | 0xfff8000000000000ULL, | ||
9277 | 0x8000000078000000ULL, | ||
9278 | 0xf800000000000000ULL, | ||
9279 | 0ULL | ||
9280 | }, | ||
9281 | { | ||
9282 | /* fixed_bit_values */ | ||
9283 | 0x0000000041000000ULL, | ||
9284 | 0x3098000000000000ULL, | ||
9285 | 0x8000000070000000ULL, | ||
9286 | 0xe000000000000000ULL, | ||
9287 | -1ULL | ||
9288 | } | ||
9289 | }, | ||
9290 | { "slti.sn", TILE_OPC_SLTI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9291 | TREG_SN, /* implicitly_written_register */ | ||
9292 | 1, /* can_bundle */ | ||
9293 | { | ||
9294 | /* operands */ | ||
9295 | { 7, 8, 0 }, | ||
9296 | { 9, 10, 1 }, | ||
9297 | { 0, }, | ||
9298 | { 0, }, | ||
9299 | { 0, } | ||
9300 | }, | ||
9301 | { | ||
9302 | /* fixed_bit_masks */ | ||
9303 | 0x800000007ff00000ULL, | ||
9304 | 0xfff8000000000000ULL, | ||
9305 | 0ULL, | ||
9306 | 0ULL, | ||
9307 | 0ULL | ||
9308 | }, | ||
9309 | { | ||
9310 | /* fixed_bit_values */ | ||
9311 | 0x0000000049000000ULL, | ||
9312 | 0x3498000000000000ULL, | ||
9313 | -1ULL, | ||
9314 | -1ULL, | ||
9315 | -1ULL | ||
9316 | } | ||
9317 | }, | ||
9318 | { "slti_u", TILE_OPC_SLTI_U, 0xf /* pipes */, 3 /* num_operands */, | ||
9319 | TREG_ZERO, /* implicitly_written_register */ | ||
9320 | 1, /* can_bundle */ | ||
9321 | { | ||
9322 | /* operands */ | ||
9323 | { 7, 8, 0 }, | ||
9324 | { 9, 10, 1 }, | ||
9325 | { 11, 12, 2 }, | ||
9326 | { 13, 14, 3 }, | ||
9327 | { 0, } | ||
9328 | }, | ||
9329 | { | ||
9330 | /* fixed_bit_masks */ | ||
9331 | 0x800000007ff00000ULL, | ||
9332 | 0xfff8000000000000ULL, | ||
9333 | 0x8000000078000000ULL, | ||
9334 | 0xf800000000000000ULL, | ||
9335 | 0ULL | ||
9336 | }, | ||
9337 | { | ||
9338 | /* fixed_bit_values */ | ||
9339 | 0x0000000041100000ULL, | ||
9340 | 0x30a0000000000000ULL, | ||
9341 | 0x8000000078000000ULL, | ||
9342 | 0xe800000000000000ULL, | ||
9343 | -1ULL | ||
9344 | } | ||
9345 | }, | ||
9346 | { "slti_u.sn", TILE_OPC_SLTI_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9347 | TREG_SN, /* implicitly_written_register */ | ||
9348 | 1, /* can_bundle */ | ||
9349 | { | ||
9350 | /* operands */ | ||
9351 | { 7, 8, 0 }, | ||
9352 | { 9, 10, 1 }, | ||
9353 | { 0, }, | ||
9354 | { 0, }, | ||
9355 | { 0, } | ||
9356 | }, | ||
9357 | { | ||
9358 | /* fixed_bit_masks */ | ||
9359 | 0x800000007ff00000ULL, | ||
9360 | 0xfff8000000000000ULL, | ||
9361 | 0ULL, | ||
9362 | 0ULL, | ||
9363 | 0ULL | ||
9364 | }, | ||
9365 | { | ||
9366 | /* fixed_bit_values */ | ||
9367 | 0x0000000049100000ULL, | ||
9368 | 0x34a0000000000000ULL, | ||
9369 | -1ULL, | ||
9370 | -1ULL, | ||
9371 | -1ULL | ||
9372 | } | ||
9373 | }, | ||
9374 | { "sltib", TILE_OPC_SLTIB, 0x3 /* pipes */, 3 /* num_operands */, | ||
9375 | TREG_ZERO, /* implicitly_written_register */ | ||
9376 | 1, /* can_bundle */ | ||
9377 | { | ||
9378 | /* operands */ | ||
9379 | { 7, 8, 0 }, | ||
9380 | { 9, 10, 1 }, | ||
9381 | { 0, }, | ||
9382 | { 0, }, | ||
9383 | { 0, } | ||
9384 | }, | ||
9385 | { | ||
9386 | /* fixed_bit_masks */ | ||
9387 | 0x800000007ff00000ULL, | ||
9388 | 0xfff8000000000000ULL, | ||
9389 | 0ULL, | ||
9390 | 0ULL, | ||
9391 | 0ULL | ||
9392 | }, | ||
9393 | { | ||
9394 | /* fixed_bit_values */ | ||
9395 | 0x0000000040c00000ULL, | ||
9396 | 0x3078000000000000ULL, | ||
9397 | -1ULL, | ||
9398 | -1ULL, | ||
9399 | -1ULL | ||
9400 | } | ||
9401 | }, | ||
9402 | { "sltib.sn", TILE_OPC_SLTIB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9403 | TREG_SN, /* implicitly_written_register */ | ||
9404 | 1, /* can_bundle */ | ||
9405 | { | ||
9406 | /* operands */ | ||
9407 | { 7, 8, 0 }, | ||
9408 | { 9, 10, 1 }, | ||
9409 | { 0, }, | ||
9410 | { 0, }, | ||
9411 | { 0, } | ||
9412 | }, | ||
9413 | { | ||
9414 | /* fixed_bit_masks */ | ||
9415 | 0x800000007ff00000ULL, | ||
9416 | 0xfff8000000000000ULL, | ||
9417 | 0ULL, | ||
9418 | 0ULL, | ||
9419 | 0ULL | ||
9420 | }, | ||
9421 | { | ||
9422 | /* fixed_bit_values */ | ||
9423 | 0x0000000048c00000ULL, | ||
9424 | 0x3478000000000000ULL, | ||
9425 | -1ULL, | ||
9426 | -1ULL, | ||
9427 | -1ULL | ||
9428 | } | ||
9429 | }, | ||
9430 | { "sltib_u", TILE_OPC_SLTIB_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
9431 | TREG_ZERO, /* implicitly_written_register */ | ||
9432 | 1, /* can_bundle */ | ||
9433 | { | ||
9434 | /* operands */ | ||
9435 | { 7, 8, 0 }, | ||
9436 | { 9, 10, 1 }, | ||
9437 | { 0, }, | ||
9438 | { 0, }, | ||
9439 | { 0, } | ||
9440 | }, | ||
9441 | { | ||
9442 | /* fixed_bit_masks */ | ||
9443 | 0x800000007ff00000ULL, | ||
9444 | 0xfff8000000000000ULL, | ||
9445 | 0ULL, | ||
9446 | 0ULL, | ||
9447 | 0ULL | ||
9448 | }, | ||
9449 | { | ||
9450 | /* fixed_bit_values */ | ||
9451 | 0x0000000040d00000ULL, | ||
9452 | 0x3080000000000000ULL, | ||
9453 | -1ULL, | ||
9454 | -1ULL, | ||
9455 | -1ULL | ||
9456 | } | ||
9457 | }, | ||
9458 | { "sltib_u.sn", TILE_OPC_SLTIB_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9459 | TREG_SN, /* implicitly_written_register */ | ||
9460 | 1, /* can_bundle */ | ||
9461 | { | ||
9462 | /* operands */ | ||
9463 | { 7, 8, 0 }, | ||
9464 | { 9, 10, 1 }, | ||
9465 | { 0, }, | ||
9466 | { 0, }, | ||
9467 | { 0, } | ||
9468 | }, | ||
9469 | { | ||
9470 | /* fixed_bit_masks */ | ||
9471 | 0x800000007ff00000ULL, | ||
9472 | 0xfff8000000000000ULL, | ||
9473 | 0ULL, | ||
9474 | 0ULL, | ||
9475 | 0ULL | ||
9476 | }, | ||
9477 | { | ||
9478 | /* fixed_bit_values */ | ||
9479 | 0x0000000048d00000ULL, | ||
9480 | 0x3480000000000000ULL, | ||
9481 | -1ULL, | ||
9482 | -1ULL, | ||
9483 | -1ULL | ||
9484 | } | ||
9485 | }, | ||
9486 | { "sltih", TILE_OPC_SLTIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
9487 | TREG_ZERO, /* implicitly_written_register */ | ||
9488 | 1, /* can_bundle */ | ||
9489 | { | ||
9490 | /* operands */ | ||
9491 | { 7, 8, 0 }, | ||
9492 | { 9, 10, 1 }, | ||
9493 | { 0, }, | ||
9494 | { 0, }, | ||
9495 | { 0, } | ||
9496 | }, | ||
9497 | { | ||
9498 | /* fixed_bit_masks */ | ||
9499 | 0x800000007ff00000ULL, | ||
9500 | 0xfff8000000000000ULL, | ||
9501 | 0ULL, | ||
9502 | 0ULL, | ||
9503 | 0ULL | ||
9504 | }, | ||
9505 | { | ||
9506 | /* fixed_bit_values */ | ||
9507 | 0x0000000040e00000ULL, | ||
9508 | 0x3088000000000000ULL, | ||
9509 | -1ULL, | ||
9510 | -1ULL, | ||
9511 | -1ULL | ||
9512 | } | ||
9513 | }, | ||
9514 | { "sltih.sn", TILE_OPC_SLTIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9515 | TREG_SN, /* implicitly_written_register */ | ||
9516 | 1, /* can_bundle */ | ||
9517 | { | ||
9518 | /* operands */ | ||
9519 | { 7, 8, 0 }, | ||
9520 | { 9, 10, 1 }, | ||
9521 | { 0, }, | ||
9522 | { 0, }, | ||
9523 | { 0, } | ||
9524 | }, | ||
9525 | { | ||
9526 | /* fixed_bit_masks */ | ||
9527 | 0x800000007ff00000ULL, | ||
9528 | 0xfff8000000000000ULL, | ||
9529 | 0ULL, | ||
9530 | 0ULL, | ||
9531 | 0ULL | ||
9532 | }, | ||
9533 | { | ||
9534 | /* fixed_bit_values */ | ||
9535 | 0x0000000048e00000ULL, | ||
9536 | 0x3488000000000000ULL, | ||
9537 | -1ULL, | ||
9538 | -1ULL, | ||
9539 | -1ULL | ||
9540 | } | ||
9541 | }, | ||
9542 | { "sltih_u", TILE_OPC_SLTIH_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
9543 | TREG_ZERO, /* implicitly_written_register */ | ||
9544 | 1, /* can_bundle */ | ||
9545 | { | ||
9546 | /* operands */ | ||
9547 | { 7, 8, 0 }, | ||
9548 | { 9, 10, 1 }, | ||
9549 | { 0, }, | ||
9550 | { 0, }, | ||
9551 | { 0, } | ||
9552 | }, | ||
9553 | { | ||
9554 | /* fixed_bit_masks */ | ||
9555 | 0x800000007ff00000ULL, | ||
9556 | 0xfff8000000000000ULL, | ||
9557 | 0ULL, | ||
9558 | 0ULL, | ||
9559 | 0ULL | ||
9560 | }, | ||
9561 | { | ||
9562 | /* fixed_bit_values */ | ||
9563 | 0x0000000040f00000ULL, | ||
9564 | 0x3090000000000000ULL, | ||
9565 | -1ULL, | ||
9566 | -1ULL, | ||
9567 | -1ULL | ||
9568 | } | ||
9569 | }, | ||
9570 | { "sltih_u.sn", TILE_OPC_SLTIH_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9571 | TREG_SN, /* implicitly_written_register */ | ||
9572 | 1, /* can_bundle */ | ||
9573 | { | ||
9574 | /* operands */ | ||
9575 | { 7, 8, 0 }, | ||
9576 | { 9, 10, 1 }, | ||
9577 | { 0, }, | ||
9578 | { 0, }, | ||
9579 | { 0, } | ||
9580 | }, | ||
9581 | { | ||
9582 | /* fixed_bit_masks */ | ||
9583 | 0x800000007ff00000ULL, | ||
9584 | 0xfff8000000000000ULL, | ||
9585 | 0ULL, | ||
9586 | 0ULL, | ||
9587 | 0ULL | ||
9588 | }, | ||
9589 | { | ||
9590 | /* fixed_bit_values */ | ||
9591 | 0x0000000048f00000ULL, | ||
9592 | 0x3490000000000000ULL, | ||
9593 | -1ULL, | ||
9594 | -1ULL, | ||
9595 | -1ULL | ||
9596 | } | ||
9597 | }, | ||
9598 | { "sne", TILE_OPC_SNE, 0xf /* pipes */, 3 /* num_operands */, | ||
9599 | TREG_ZERO, /* implicitly_written_register */ | ||
9600 | 1, /* can_bundle */ | ||
9601 | { | ||
9602 | /* operands */ | ||
9603 | { 7, 8, 16 }, | ||
9604 | { 9, 10, 17 }, | ||
9605 | { 11, 12, 18 }, | ||
9606 | { 13, 14, 19 }, | ||
9607 | { 0, } | ||
9608 | }, | ||
9609 | { | ||
9610 | /* fixed_bit_masks */ | ||
9611 | 0x800000007ffc0000ULL, | ||
9612 | 0xfffe000000000000ULL, | ||
9613 | 0x80000000780c0000ULL, | ||
9614 | 0xf806000000000000ULL, | ||
9615 | 0ULL | ||
9616 | }, | ||
9617 | { | ||
9618 | /* fixed_bit_values */ | ||
9619 | 0x00000000015c0000ULL, | ||
9620 | 0x0872000000000000ULL, | ||
9621 | 0x80000000300c0000ULL, | ||
9622 | 0xb006000000000000ULL, | ||
9623 | -1ULL | ||
9624 | } | ||
9625 | }, | ||
9626 | { "sne.sn", TILE_OPC_SNE_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9627 | TREG_SN, /* implicitly_written_register */ | ||
9628 | 1, /* can_bundle */ | ||
9629 | { | ||
9630 | /* operands */ | ||
9631 | { 7, 8, 16 }, | ||
9632 | { 9, 10, 17 }, | ||
9633 | { 0, }, | ||
9634 | { 0, }, | ||
9635 | { 0, } | ||
9636 | }, | ||
9637 | { | ||
9638 | /* fixed_bit_masks */ | ||
9639 | 0x800000007ffc0000ULL, | ||
9640 | 0xfffe000000000000ULL, | ||
9641 | 0ULL, | ||
9642 | 0ULL, | ||
9643 | 0ULL | ||
9644 | }, | ||
9645 | { | ||
9646 | /* fixed_bit_values */ | ||
9647 | 0x00000000095c0000ULL, | ||
9648 | 0x0c72000000000000ULL, | ||
9649 | -1ULL, | ||
9650 | -1ULL, | ||
9651 | -1ULL | ||
9652 | } | ||
9653 | }, | ||
9654 | { "sneb", TILE_OPC_SNEB, 0x3 /* pipes */, 3 /* num_operands */, | ||
9655 | TREG_ZERO, /* implicitly_written_register */ | ||
9656 | 1, /* can_bundle */ | ||
9657 | { | ||
9658 | /* operands */ | ||
9659 | { 7, 8, 16 }, | ||
9660 | { 9, 10, 17 }, | ||
9661 | { 0, }, | ||
9662 | { 0, }, | ||
9663 | { 0, } | ||
9664 | }, | ||
9665 | { | ||
9666 | /* fixed_bit_masks */ | ||
9667 | 0x800000007ffc0000ULL, | ||
9668 | 0xfffe000000000000ULL, | ||
9669 | 0ULL, | ||
9670 | 0ULL, | ||
9671 | 0ULL | ||
9672 | }, | ||
9673 | { | ||
9674 | /* fixed_bit_values */ | ||
9675 | 0x0000000001540000ULL, | ||
9676 | 0x086e000000000000ULL, | ||
9677 | -1ULL, | ||
9678 | -1ULL, | ||
9679 | -1ULL | ||
9680 | } | ||
9681 | }, | ||
9682 | { "sneb.sn", TILE_OPC_SNEB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9683 | TREG_SN, /* implicitly_written_register */ | ||
9684 | 1, /* can_bundle */ | ||
9685 | { | ||
9686 | /* operands */ | ||
9687 | { 7, 8, 16 }, | ||
9688 | { 9, 10, 17 }, | ||
9689 | { 0, }, | ||
9690 | { 0, }, | ||
9691 | { 0, } | ||
9692 | }, | ||
9693 | { | ||
9694 | /* fixed_bit_masks */ | ||
9695 | 0x800000007ffc0000ULL, | ||
9696 | 0xfffe000000000000ULL, | ||
9697 | 0ULL, | ||
9698 | 0ULL, | ||
9699 | 0ULL | ||
9700 | }, | ||
9701 | { | ||
9702 | /* fixed_bit_values */ | ||
9703 | 0x0000000009540000ULL, | ||
9704 | 0x0c6e000000000000ULL, | ||
9705 | -1ULL, | ||
9706 | -1ULL, | ||
9707 | -1ULL | ||
9708 | } | ||
9709 | }, | ||
9710 | { "sneh", TILE_OPC_SNEH, 0x3 /* pipes */, 3 /* num_operands */, | ||
9711 | TREG_ZERO, /* implicitly_written_register */ | ||
9712 | 1, /* can_bundle */ | ||
9713 | { | ||
9714 | /* operands */ | ||
9715 | { 7, 8, 16 }, | ||
9716 | { 9, 10, 17 }, | ||
9717 | { 0, }, | ||
9718 | { 0, }, | ||
9719 | { 0, } | ||
9720 | }, | ||
9721 | { | ||
9722 | /* fixed_bit_masks */ | ||
9723 | 0x800000007ffc0000ULL, | ||
9724 | 0xfffe000000000000ULL, | ||
9725 | 0ULL, | ||
9726 | 0ULL, | ||
9727 | 0ULL | ||
9728 | }, | ||
9729 | { | ||
9730 | /* fixed_bit_values */ | ||
9731 | 0x0000000001580000ULL, | ||
9732 | 0x0870000000000000ULL, | ||
9733 | -1ULL, | ||
9734 | -1ULL, | ||
9735 | -1ULL | ||
9736 | } | ||
9737 | }, | ||
9738 | { "sneh.sn", TILE_OPC_SNEH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9739 | TREG_SN, /* implicitly_written_register */ | ||
9740 | 1, /* can_bundle */ | ||
9741 | { | ||
9742 | /* operands */ | ||
9743 | { 7, 8, 16 }, | ||
9744 | { 9, 10, 17 }, | ||
9745 | { 0, }, | ||
9746 | { 0, }, | ||
9747 | { 0, } | ||
9748 | }, | ||
9749 | { | ||
9750 | /* fixed_bit_masks */ | ||
9751 | 0x800000007ffc0000ULL, | ||
9752 | 0xfffe000000000000ULL, | ||
9753 | 0ULL, | ||
9754 | 0ULL, | ||
9755 | 0ULL | ||
9756 | }, | ||
9757 | { | ||
9758 | /* fixed_bit_values */ | ||
9759 | 0x0000000009580000ULL, | ||
9760 | 0x0c70000000000000ULL, | ||
9761 | -1ULL, | ||
9762 | -1ULL, | ||
9763 | -1ULL | ||
9764 | } | ||
9765 | }, | ||
9766 | { "sra", TILE_OPC_SRA, 0xf /* pipes */, 3 /* num_operands */, | ||
9767 | TREG_ZERO, /* implicitly_written_register */ | ||
9768 | 1, /* can_bundle */ | ||
9769 | { | ||
9770 | /* operands */ | ||
9771 | { 7, 8, 16 }, | ||
9772 | { 9, 10, 17 }, | ||
9773 | { 11, 12, 18 }, | ||
9774 | { 13, 14, 19 }, | ||
9775 | { 0, } | ||
9776 | }, | ||
9777 | { | ||
9778 | /* fixed_bit_masks */ | ||
9779 | 0x800000007ffc0000ULL, | ||
9780 | 0xfffe000000000000ULL, | ||
9781 | 0x80000000780c0000ULL, | ||
9782 | 0xf806000000000000ULL, | ||
9783 | 0ULL | ||
9784 | }, | ||
9785 | { | ||
9786 | /* fixed_bit_values */ | ||
9787 | 0x0000000001680000ULL, | ||
9788 | 0x0878000000000000ULL, | ||
9789 | 0x80000000200c0000ULL, | ||
9790 | 0xa006000000000000ULL, | ||
9791 | -1ULL | ||
9792 | } | ||
9793 | }, | ||
9794 | { "sra.sn", TILE_OPC_SRA_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9795 | TREG_SN, /* implicitly_written_register */ | ||
9796 | 1, /* can_bundle */ | ||
9797 | { | ||
9798 | /* operands */ | ||
9799 | { 7, 8, 16 }, | ||
9800 | { 9, 10, 17 }, | ||
9801 | { 0, }, | ||
9802 | { 0, }, | ||
9803 | { 0, } | ||
9804 | }, | ||
9805 | { | ||
9806 | /* fixed_bit_masks */ | ||
9807 | 0x800000007ffc0000ULL, | ||
9808 | 0xfffe000000000000ULL, | ||
9809 | 0ULL, | ||
9810 | 0ULL, | ||
9811 | 0ULL | ||
9812 | }, | ||
9813 | { | ||
9814 | /* fixed_bit_values */ | ||
9815 | 0x0000000009680000ULL, | ||
9816 | 0x0c78000000000000ULL, | ||
9817 | -1ULL, | ||
9818 | -1ULL, | ||
9819 | -1ULL | ||
9820 | } | ||
9821 | }, | ||
9822 | { "srab", TILE_OPC_SRAB, 0x3 /* pipes */, 3 /* num_operands */, | ||
9823 | TREG_ZERO, /* implicitly_written_register */ | ||
9824 | 1, /* can_bundle */ | ||
9825 | { | ||
9826 | /* operands */ | ||
9827 | { 7, 8, 16 }, | ||
9828 | { 9, 10, 17 }, | ||
9829 | { 0, }, | ||
9830 | { 0, }, | ||
9831 | { 0, } | ||
9832 | }, | ||
9833 | { | ||
9834 | /* fixed_bit_masks */ | ||
9835 | 0x800000007ffc0000ULL, | ||
9836 | 0xfffe000000000000ULL, | ||
9837 | 0ULL, | ||
9838 | 0ULL, | ||
9839 | 0ULL | ||
9840 | }, | ||
9841 | { | ||
9842 | /* fixed_bit_values */ | ||
9843 | 0x0000000001600000ULL, | ||
9844 | 0x0874000000000000ULL, | ||
9845 | -1ULL, | ||
9846 | -1ULL, | ||
9847 | -1ULL | ||
9848 | } | ||
9849 | }, | ||
9850 | { "srab.sn", TILE_OPC_SRAB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9851 | TREG_SN, /* implicitly_written_register */ | ||
9852 | 1, /* can_bundle */ | ||
9853 | { | ||
9854 | /* operands */ | ||
9855 | { 7, 8, 16 }, | ||
9856 | { 9, 10, 17 }, | ||
9857 | { 0, }, | ||
9858 | { 0, }, | ||
9859 | { 0, } | ||
9860 | }, | ||
9861 | { | ||
9862 | /* fixed_bit_masks */ | ||
9863 | 0x800000007ffc0000ULL, | ||
9864 | 0xfffe000000000000ULL, | ||
9865 | 0ULL, | ||
9866 | 0ULL, | ||
9867 | 0ULL | ||
9868 | }, | ||
9869 | { | ||
9870 | /* fixed_bit_values */ | ||
9871 | 0x0000000009600000ULL, | ||
9872 | 0x0c74000000000000ULL, | ||
9873 | -1ULL, | ||
9874 | -1ULL, | ||
9875 | -1ULL | ||
9876 | } | ||
9877 | }, | ||
9878 | { "srah", TILE_OPC_SRAH, 0x3 /* pipes */, 3 /* num_operands */, | ||
9879 | TREG_ZERO, /* implicitly_written_register */ | ||
9880 | 1, /* can_bundle */ | ||
9881 | { | ||
9882 | /* operands */ | ||
9883 | { 7, 8, 16 }, | ||
9884 | { 9, 10, 17 }, | ||
9885 | { 0, }, | ||
9886 | { 0, }, | ||
9887 | { 0, } | ||
9888 | }, | ||
9889 | { | ||
9890 | /* fixed_bit_masks */ | ||
9891 | 0x800000007ffc0000ULL, | ||
9892 | 0xfffe000000000000ULL, | ||
9893 | 0ULL, | ||
9894 | 0ULL, | ||
9895 | 0ULL | ||
9896 | }, | ||
9897 | { | ||
9898 | /* fixed_bit_values */ | ||
9899 | 0x0000000001640000ULL, | ||
9900 | 0x0876000000000000ULL, | ||
9901 | -1ULL, | ||
9902 | -1ULL, | ||
9903 | -1ULL | ||
9904 | } | ||
9905 | }, | ||
9906 | { "srah.sn", TILE_OPC_SRAH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9907 | TREG_SN, /* implicitly_written_register */ | ||
9908 | 1, /* can_bundle */ | ||
9909 | { | ||
9910 | /* operands */ | ||
9911 | { 7, 8, 16 }, | ||
9912 | { 9, 10, 17 }, | ||
9913 | { 0, }, | ||
9914 | { 0, }, | ||
9915 | { 0, } | ||
9916 | }, | ||
9917 | { | ||
9918 | /* fixed_bit_masks */ | ||
9919 | 0x800000007ffc0000ULL, | ||
9920 | 0xfffe000000000000ULL, | ||
9921 | 0ULL, | ||
9922 | 0ULL, | ||
9923 | 0ULL | ||
9924 | }, | ||
9925 | { | ||
9926 | /* fixed_bit_values */ | ||
9927 | 0x0000000009640000ULL, | ||
9928 | 0x0c76000000000000ULL, | ||
9929 | -1ULL, | ||
9930 | -1ULL, | ||
9931 | -1ULL | ||
9932 | } | ||
9933 | }, | ||
9934 | { "srai", TILE_OPC_SRAI, 0xf /* pipes */, 3 /* num_operands */, | ||
9935 | TREG_ZERO, /* implicitly_written_register */ | ||
9936 | 1, /* can_bundle */ | ||
9937 | { | ||
9938 | /* operands */ | ||
9939 | { 7, 8, 32 }, | ||
9940 | { 9, 10, 33 }, | ||
9941 | { 11, 12, 34 }, | ||
9942 | { 13, 14, 35 }, | ||
9943 | { 0, } | ||
9944 | }, | ||
9945 | { | ||
9946 | /* fixed_bit_masks */ | ||
9947 | 0x800000007ffe0000ULL, | ||
9948 | 0xffff000000000000ULL, | ||
9949 | 0x80000000780e0000ULL, | ||
9950 | 0xf807000000000000ULL, | ||
9951 | 0ULL | ||
9952 | }, | ||
9953 | { | ||
9954 | /* fixed_bit_values */ | ||
9955 | 0x0000000070140000ULL, | ||
9956 | 0x400a000000000000ULL, | ||
9957 | 0x8000000068080000ULL, | ||
9958 | 0xd804000000000000ULL, | ||
9959 | -1ULL | ||
9960 | } | ||
9961 | }, | ||
9962 | { "srai.sn", TILE_OPC_SRAI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
9963 | TREG_SN, /* implicitly_written_register */ | ||
9964 | 1, /* can_bundle */ | ||
9965 | { | ||
9966 | /* operands */ | ||
9967 | { 7, 8, 32 }, | ||
9968 | { 9, 10, 33 }, | ||
9969 | { 0, }, | ||
9970 | { 0, }, | ||
9971 | { 0, } | ||
9972 | }, | ||
9973 | { | ||
9974 | /* fixed_bit_masks */ | ||
9975 | 0x800000007ffe0000ULL, | ||
9976 | 0xffff000000000000ULL, | ||
9977 | 0ULL, | ||
9978 | 0ULL, | ||
9979 | 0ULL | ||
9980 | }, | ||
9981 | { | ||
9982 | /* fixed_bit_values */ | ||
9983 | 0x0000000078140000ULL, | ||
9984 | 0x440a000000000000ULL, | ||
9985 | -1ULL, | ||
9986 | -1ULL, | ||
9987 | -1ULL | ||
9988 | } | ||
9989 | }, | ||
9990 | { "sraib", TILE_OPC_SRAIB, 0x3 /* pipes */, 3 /* num_operands */, | ||
9991 | TREG_ZERO, /* implicitly_written_register */ | ||
9992 | 1, /* can_bundle */ | ||
9993 | { | ||
9994 | /* operands */ | ||
9995 | { 7, 8, 32 }, | ||
9996 | { 9, 10, 33 }, | ||
9997 | { 0, }, | ||
9998 | { 0, }, | ||
9999 | { 0, } | ||
10000 | }, | ||
10001 | { | ||
10002 | /* fixed_bit_masks */ | ||
10003 | 0x800000007ffe0000ULL, | ||
10004 | 0xffff000000000000ULL, | ||
10005 | 0ULL, | ||
10006 | 0ULL, | ||
10007 | 0ULL | ||
10008 | }, | ||
10009 | { | ||
10010 | /* fixed_bit_values */ | ||
10011 | 0x0000000070100000ULL, | ||
10012 | 0x4008000000000000ULL, | ||
10013 | -1ULL, | ||
10014 | -1ULL, | ||
10015 | -1ULL | ||
10016 | } | ||
10017 | }, | ||
10018 | { "sraib.sn", TILE_OPC_SRAIB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10019 | TREG_SN, /* implicitly_written_register */ | ||
10020 | 1, /* can_bundle */ | ||
10021 | { | ||
10022 | /* operands */ | ||
10023 | { 7, 8, 32 }, | ||
10024 | { 9, 10, 33 }, | ||
10025 | { 0, }, | ||
10026 | { 0, }, | ||
10027 | { 0, } | ||
10028 | }, | ||
10029 | { | ||
10030 | /* fixed_bit_masks */ | ||
10031 | 0x800000007ffe0000ULL, | ||
10032 | 0xffff000000000000ULL, | ||
10033 | 0ULL, | ||
10034 | 0ULL, | ||
10035 | 0ULL | ||
10036 | }, | ||
10037 | { | ||
10038 | /* fixed_bit_values */ | ||
10039 | 0x0000000078100000ULL, | ||
10040 | 0x4408000000000000ULL, | ||
10041 | -1ULL, | ||
10042 | -1ULL, | ||
10043 | -1ULL | ||
10044 | } | ||
10045 | }, | ||
10046 | { "sraih", TILE_OPC_SRAIH, 0x3 /* pipes */, 3 /* num_operands */, | ||
10047 | TREG_ZERO, /* implicitly_written_register */ | ||
10048 | 1, /* can_bundle */ | ||
10049 | { | ||
10050 | /* operands */ | ||
10051 | { 7, 8, 32 }, | ||
10052 | { 9, 10, 33 }, | ||
10053 | { 0, }, | ||
10054 | { 0, }, | ||
10055 | { 0, } | ||
10056 | }, | ||
10057 | { | ||
10058 | /* fixed_bit_masks */ | ||
10059 | 0x800000007ffe0000ULL, | ||
10060 | 0xffff000000000000ULL, | ||
10061 | 0ULL, | ||
10062 | 0ULL, | ||
10063 | 0ULL | ||
10064 | }, | ||
10065 | { | ||
10066 | /* fixed_bit_values */ | ||
10067 | 0x0000000070120000ULL, | ||
10068 | 0x4009000000000000ULL, | ||
10069 | -1ULL, | ||
10070 | -1ULL, | ||
10071 | -1ULL | ||
10072 | } | ||
10073 | }, | ||
10074 | { "sraih.sn", TILE_OPC_SRAIH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10075 | TREG_SN, /* implicitly_written_register */ | ||
10076 | 1, /* can_bundle */ | ||
10077 | { | ||
10078 | /* operands */ | ||
10079 | { 7, 8, 32 }, | ||
10080 | { 9, 10, 33 }, | ||
10081 | { 0, }, | ||
10082 | { 0, }, | ||
10083 | { 0, } | ||
10084 | }, | ||
10085 | { | ||
10086 | /* fixed_bit_masks */ | ||
10087 | 0x800000007ffe0000ULL, | ||
10088 | 0xffff000000000000ULL, | ||
10089 | 0ULL, | ||
10090 | 0ULL, | ||
10091 | 0ULL | ||
10092 | }, | ||
10093 | { | ||
10094 | /* fixed_bit_values */ | ||
10095 | 0x0000000078120000ULL, | ||
10096 | 0x4409000000000000ULL, | ||
10097 | -1ULL, | ||
10098 | -1ULL, | ||
10099 | -1ULL | ||
10100 | } | ||
10101 | }, | ||
10102 | { "sub", TILE_OPC_SUB, 0xf /* pipes */, 3 /* num_operands */, | ||
10103 | TREG_ZERO, /* implicitly_written_register */ | ||
10104 | 1, /* can_bundle */ | ||
10105 | { | ||
10106 | /* operands */ | ||
10107 | { 7, 8, 16 }, | ||
10108 | { 9, 10, 17 }, | ||
10109 | { 11, 12, 18 }, | ||
10110 | { 13, 14, 19 }, | ||
10111 | { 0, } | ||
10112 | }, | ||
10113 | { | ||
10114 | /* fixed_bit_masks */ | ||
10115 | 0x800000007ffc0000ULL, | ||
10116 | 0xfffe000000000000ULL, | ||
10117 | 0x80000000780c0000ULL, | ||
10118 | 0xf806000000000000ULL, | ||
10119 | 0ULL | ||
10120 | }, | ||
10121 | { | ||
10122 | /* fixed_bit_values */ | ||
10123 | 0x0000000001740000ULL, | ||
10124 | 0x087e000000000000ULL, | ||
10125 | 0x80000000080c0000ULL, | ||
10126 | 0x8806000000000000ULL, | ||
10127 | -1ULL | ||
10128 | } | ||
10129 | }, | ||
10130 | { "sub.sn", TILE_OPC_SUB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10131 | TREG_SN, /* implicitly_written_register */ | ||
10132 | 1, /* can_bundle */ | ||
10133 | { | ||
10134 | /* operands */ | ||
10135 | { 7, 8, 16 }, | ||
10136 | { 9, 10, 17 }, | ||
10137 | { 0, }, | ||
10138 | { 0, }, | ||
10139 | { 0, } | ||
10140 | }, | ||
10141 | { | ||
10142 | /* fixed_bit_masks */ | ||
10143 | 0x800000007ffc0000ULL, | ||
10144 | 0xfffe000000000000ULL, | ||
10145 | 0ULL, | ||
10146 | 0ULL, | ||
10147 | 0ULL | ||
10148 | }, | ||
10149 | { | ||
10150 | /* fixed_bit_values */ | ||
10151 | 0x0000000009740000ULL, | ||
10152 | 0x0c7e000000000000ULL, | ||
10153 | -1ULL, | ||
10154 | -1ULL, | ||
10155 | -1ULL | ||
10156 | } | ||
10157 | }, | ||
10158 | { "subb", TILE_OPC_SUBB, 0x3 /* pipes */, 3 /* num_operands */, | ||
10159 | TREG_ZERO, /* implicitly_written_register */ | ||
10160 | 1, /* can_bundle */ | ||
10161 | { | ||
10162 | /* operands */ | ||
10163 | { 7, 8, 16 }, | ||
10164 | { 9, 10, 17 }, | ||
10165 | { 0, }, | ||
10166 | { 0, }, | ||
10167 | { 0, } | ||
10168 | }, | ||
10169 | { | ||
10170 | /* fixed_bit_masks */ | ||
10171 | 0x800000007ffc0000ULL, | ||
10172 | 0xfffe000000000000ULL, | ||
10173 | 0ULL, | ||
10174 | 0ULL, | ||
10175 | 0ULL | ||
10176 | }, | ||
10177 | { | ||
10178 | /* fixed_bit_values */ | ||
10179 | 0x00000000016c0000ULL, | ||
10180 | 0x087a000000000000ULL, | ||
10181 | -1ULL, | ||
10182 | -1ULL, | ||
10183 | -1ULL | ||
10184 | } | ||
10185 | }, | ||
10186 | { "subb.sn", TILE_OPC_SUBB_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10187 | TREG_SN, /* implicitly_written_register */ | ||
10188 | 1, /* can_bundle */ | ||
10189 | { | ||
10190 | /* operands */ | ||
10191 | { 7, 8, 16 }, | ||
10192 | { 9, 10, 17 }, | ||
10193 | { 0, }, | ||
10194 | { 0, }, | ||
10195 | { 0, } | ||
10196 | }, | ||
10197 | { | ||
10198 | /* fixed_bit_masks */ | ||
10199 | 0x800000007ffc0000ULL, | ||
10200 | 0xfffe000000000000ULL, | ||
10201 | 0ULL, | ||
10202 | 0ULL, | ||
10203 | 0ULL | ||
10204 | }, | ||
10205 | { | ||
10206 | /* fixed_bit_values */ | ||
10207 | 0x00000000096c0000ULL, | ||
10208 | 0x0c7a000000000000ULL, | ||
10209 | -1ULL, | ||
10210 | -1ULL, | ||
10211 | -1ULL | ||
10212 | } | ||
10213 | }, | ||
10214 | { "subbs_u", TILE_OPC_SUBBS_U, 0x3 /* pipes */, 3 /* num_operands */, | ||
10215 | TREG_ZERO, /* implicitly_written_register */ | ||
10216 | 1, /* can_bundle */ | ||
10217 | { | ||
10218 | /* operands */ | ||
10219 | { 7, 8, 16 }, | ||
10220 | { 9, 10, 17 }, | ||
10221 | { 0, }, | ||
10222 | { 0, }, | ||
10223 | { 0, } | ||
10224 | }, | ||
10225 | { | ||
10226 | /* fixed_bit_masks */ | ||
10227 | 0x800000007ffc0000ULL, | ||
10228 | 0xfffe000000000000ULL, | ||
10229 | 0ULL, | ||
10230 | 0ULL, | ||
10231 | 0ULL | ||
10232 | }, | ||
10233 | { | ||
10234 | /* fixed_bit_values */ | ||
10235 | 0x0000000001900000ULL, | ||
10236 | 0x088c000000000000ULL, | ||
10237 | -1ULL, | ||
10238 | -1ULL, | ||
10239 | -1ULL | ||
10240 | } | ||
10241 | }, | ||
10242 | { "subbs_u.sn", TILE_OPC_SUBBS_U_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10243 | TREG_SN, /* implicitly_written_register */ | ||
10244 | 1, /* can_bundle */ | ||
10245 | { | ||
10246 | /* operands */ | ||
10247 | { 7, 8, 16 }, | ||
10248 | { 9, 10, 17 }, | ||
10249 | { 0, }, | ||
10250 | { 0, }, | ||
10251 | { 0, } | ||
10252 | }, | ||
10253 | { | ||
10254 | /* fixed_bit_masks */ | ||
10255 | 0x800000007ffc0000ULL, | ||
10256 | 0xfffe000000000000ULL, | ||
10257 | 0ULL, | ||
10258 | 0ULL, | ||
10259 | 0ULL | ||
10260 | }, | ||
10261 | { | ||
10262 | /* fixed_bit_values */ | ||
10263 | 0x0000000009900000ULL, | ||
10264 | 0x0c8c000000000000ULL, | ||
10265 | -1ULL, | ||
10266 | -1ULL, | ||
10267 | -1ULL | ||
10268 | } | ||
10269 | }, | ||
10270 | { "subh", TILE_OPC_SUBH, 0x3 /* pipes */, 3 /* num_operands */, | ||
10271 | TREG_ZERO, /* implicitly_written_register */ | ||
10272 | 1, /* can_bundle */ | ||
10273 | { | ||
10274 | /* operands */ | ||
10275 | { 7, 8, 16 }, | ||
10276 | { 9, 10, 17 }, | ||
10277 | { 0, }, | ||
10278 | { 0, }, | ||
10279 | { 0, } | ||
10280 | }, | ||
10281 | { | ||
10282 | /* fixed_bit_masks */ | ||
10283 | 0x800000007ffc0000ULL, | ||
10284 | 0xfffe000000000000ULL, | ||
10285 | 0ULL, | ||
10286 | 0ULL, | ||
10287 | 0ULL | ||
10288 | }, | ||
10289 | { | ||
10290 | /* fixed_bit_values */ | ||
10291 | 0x0000000001700000ULL, | ||
10292 | 0x087c000000000000ULL, | ||
10293 | -1ULL, | ||
10294 | -1ULL, | ||
10295 | -1ULL | ||
10296 | } | ||
10297 | }, | ||
10298 | { "subh.sn", TILE_OPC_SUBH_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10299 | TREG_SN, /* implicitly_written_register */ | ||
10300 | 1, /* can_bundle */ | ||
10301 | { | ||
10302 | /* operands */ | ||
10303 | { 7, 8, 16 }, | ||
10304 | { 9, 10, 17 }, | ||
10305 | { 0, }, | ||
10306 | { 0, }, | ||
10307 | { 0, } | ||
10308 | }, | ||
10309 | { | ||
10310 | /* fixed_bit_masks */ | ||
10311 | 0x800000007ffc0000ULL, | ||
10312 | 0xfffe000000000000ULL, | ||
10313 | 0ULL, | ||
10314 | 0ULL, | ||
10315 | 0ULL | ||
10316 | }, | ||
10317 | { | ||
10318 | /* fixed_bit_values */ | ||
10319 | 0x0000000009700000ULL, | ||
10320 | 0x0c7c000000000000ULL, | ||
10321 | -1ULL, | ||
10322 | -1ULL, | ||
10323 | -1ULL | ||
10324 | } | ||
10325 | }, | ||
10326 | { "subhs", TILE_OPC_SUBHS, 0x3 /* pipes */, 3 /* num_operands */, | ||
10327 | TREG_ZERO, /* implicitly_written_register */ | ||
10328 | 1, /* can_bundle */ | ||
10329 | { | ||
10330 | /* operands */ | ||
10331 | { 7, 8, 16 }, | ||
10332 | { 9, 10, 17 }, | ||
10333 | { 0, }, | ||
10334 | { 0, }, | ||
10335 | { 0, } | ||
10336 | }, | ||
10337 | { | ||
10338 | /* fixed_bit_masks */ | ||
10339 | 0x800000007ffc0000ULL, | ||
10340 | 0xfffe000000000000ULL, | ||
10341 | 0ULL, | ||
10342 | 0ULL, | ||
10343 | 0ULL | ||
10344 | }, | ||
10345 | { | ||
10346 | /* fixed_bit_values */ | ||
10347 | 0x0000000001940000ULL, | ||
10348 | 0x088e000000000000ULL, | ||
10349 | -1ULL, | ||
10350 | -1ULL, | ||
10351 | -1ULL | ||
10352 | } | ||
10353 | }, | ||
10354 | { "subhs.sn", TILE_OPC_SUBHS_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10355 | TREG_SN, /* implicitly_written_register */ | ||
10356 | 1, /* can_bundle */ | ||
10357 | { | ||
10358 | /* operands */ | ||
10359 | { 7, 8, 16 }, | ||
10360 | { 9, 10, 17 }, | ||
10361 | { 0, }, | ||
10362 | { 0, }, | ||
10363 | { 0, } | ||
10364 | }, | ||
10365 | { | ||
10366 | /* fixed_bit_masks */ | ||
10367 | 0x800000007ffc0000ULL, | ||
10368 | 0xfffe000000000000ULL, | ||
10369 | 0ULL, | ||
10370 | 0ULL, | ||
10371 | 0ULL | ||
10372 | }, | ||
10373 | { | ||
10374 | /* fixed_bit_values */ | ||
10375 | 0x0000000009940000ULL, | ||
10376 | 0x0c8e000000000000ULL, | ||
10377 | -1ULL, | ||
10378 | -1ULL, | ||
10379 | -1ULL | ||
10380 | } | ||
10381 | }, | ||
10382 | { "subs", TILE_OPC_SUBS, 0x3 /* pipes */, 3 /* num_operands */, | ||
10383 | TREG_ZERO, /* implicitly_written_register */ | ||
10384 | 1, /* can_bundle */ | ||
10385 | { | ||
10386 | /* operands */ | ||
10387 | { 7, 8, 16 }, | ||
10388 | { 9, 10, 17 }, | ||
10389 | { 0, }, | ||
10390 | { 0, }, | ||
10391 | { 0, } | ||
10392 | }, | ||
10393 | { | ||
10394 | /* fixed_bit_masks */ | ||
10395 | 0x800000007ffc0000ULL, | ||
10396 | 0xfffe000000000000ULL, | ||
10397 | 0ULL, | ||
10398 | 0ULL, | ||
10399 | 0ULL | ||
10400 | }, | ||
10401 | { | ||
10402 | /* fixed_bit_values */ | ||
10403 | 0x0000000001840000ULL, | ||
10404 | 0x0886000000000000ULL, | ||
10405 | -1ULL, | ||
10406 | -1ULL, | ||
10407 | -1ULL | ||
10408 | } | ||
10409 | }, | ||
10410 | { "subs.sn", TILE_OPC_SUBS_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10411 | TREG_SN, /* implicitly_written_register */ | ||
10412 | 1, /* can_bundle */ | ||
10413 | { | ||
10414 | /* operands */ | ||
10415 | { 7, 8, 16 }, | ||
10416 | { 9, 10, 17 }, | ||
10417 | { 0, }, | ||
10418 | { 0, }, | ||
10419 | { 0, } | ||
10420 | }, | ||
10421 | { | ||
10422 | /* fixed_bit_masks */ | ||
10423 | 0x800000007ffc0000ULL, | ||
10424 | 0xfffe000000000000ULL, | ||
10425 | 0ULL, | ||
10426 | 0ULL, | ||
10427 | 0ULL | ||
10428 | }, | ||
10429 | { | ||
10430 | /* fixed_bit_values */ | ||
10431 | 0x0000000009840000ULL, | ||
10432 | 0x0c86000000000000ULL, | ||
10433 | -1ULL, | ||
10434 | -1ULL, | ||
10435 | -1ULL | ||
10436 | } | ||
10437 | }, | ||
10438 | { "sw", TILE_OPC_SW, 0x12 /* pipes */, 2 /* num_operands */, | ||
10439 | TREG_ZERO, /* implicitly_written_register */ | ||
10440 | 1, /* can_bundle */ | ||
10441 | { | ||
10442 | /* operands */ | ||
10443 | { 0, }, | ||
10444 | { 10, 17 }, | ||
10445 | { 0, }, | ||
10446 | { 0, }, | ||
10447 | { 15, 36 } | ||
10448 | }, | ||
10449 | { | ||
10450 | /* fixed_bit_masks */ | ||
10451 | 0ULL, | ||
10452 | 0xfbfe000000000000ULL, | ||
10453 | 0ULL, | ||
10454 | 0ULL, | ||
10455 | 0x8700000000000000ULL | ||
10456 | }, | ||
10457 | { | ||
10458 | /* fixed_bit_values */ | ||
10459 | -1ULL, | ||
10460 | 0x0880000000000000ULL, | ||
10461 | -1ULL, | ||
10462 | -1ULL, | ||
10463 | 0x8700000000000000ULL | ||
10464 | } | ||
10465 | }, | ||
10466 | { "swadd", TILE_OPC_SWADD, 0x2 /* pipes */, 3 /* num_operands */, | ||
10467 | TREG_ZERO, /* implicitly_written_register */ | ||
10468 | 1, /* can_bundle */ | ||
10469 | { | ||
10470 | /* operands */ | ||
10471 | { 0, }, | ||
10472 | { 24, 17, 37 }, | ||
10473 | { 0, }, | ||
10474 | { 0, }, | ||
10475 | { 0, } | ||
10476 | }, | ||
10477 | { | ||
10478 | /* fixed_bit_masks */ | ||
10479 | 0ULL, | ||
10480 | 0xfbf8000000000000ULL, | ||
10481 | 0ULL, | ||
10482 | 0ULL, | ||
10483 | 0ULL | ||
10484 | }, | ||
10485 | { | ||
10486 | /* fixed_bit_values */ | ||
10487 | -1ULL, | ||
10488 | 0x30f0000000000000ULL, | ||
10489 | -1ULL, | ||
10490 | -1ULL, | ||
10491 | -1ULL | ||
10492 | } | ||
10493 | }, | ||
10494 | { "swint0", TILE_OPC_SWINT0, 0x2 /* pipes */, 0 /* num_operands */, | ||
10495 | TREG_ZERO, /* implicitly_written_register */ | ||
10496 | 0, /* can_bundle */ | ||
10497 | { | ||
10498 | /* operands */ | ||
10499 | { 0, }, | ||
10500 | { }, | ||
10501 | { 0, }, | ||
10502 | { 0, }, | ||
10503 | { 0, } | ||
10504 | }, | ||
10505 | { | ||
10506 | /* fixed_bit_masks */ | ||
10507 | 0ULL, | ||
10508 | 0xfbfff80000000000ULL, | ||
10509 | 0ULL, | ||
10510 | 0ULL, | ||
10511 | 0ULL | ||
10512 | }, | ||
10513 | { | ||
10514 | /* fixed_bit_values */ | ||
10515 | -1ULL, | ||
10516 | 0x400b900000000000ULL, | ||
10517 | -1ULL, | ||
10518 | -1ULL, | ||
10519 | -1ULL | ||
10520 | } | ||
10521 | }, | ||
10522 | { "swint1", TILE_OPC_SWINT1, 0x2 /* pipes */, 0 /* num_operands */, | ||
10523 | TREG_ZERO, /* implicitly_written_register */ | ||
10524 | 0, /* can_bundle */ | ||
10525 | { | ||
10526 | /* operands */ | ||
10527 | { 0, }, | ||
10528 | { }, | ||
10529 | { 0, }, | ||
10530 | { 0, }, | ||
10531 | { 0, } | ||
10532 | }, | ||
10533 | { | ||
10534 | /* fixed_bit_masks */ | ||
10535 | 0ULL, | ||
10536 | 0xfbfff80000000000ULL, | ||
10537 | 0ULL, | ||
10538 | 0ULL, | ||
10539 | 0ULL | ||
10540 | }, | ||
10541 | { | ||
10542 | /* fixed_bit_values */ | ||
10543 | -1ULL, | ||
10544 | 0x400b980000000000ULL, | ||
10545 | -1ULL, | ||
10546 | -1ULL, | ||
10547 | -1ULL | ||
10548 | } | ||
10549 | }, | ||
10550 | { "swint2", TILE_OPC_SWINT2, 0x2 /* pipes */, 0 /* num_operands */, | ||
10551 | TREG_ZERO, /* implicitly_written_register */ | ||
10552 | 0, /* can_bundle */ | ||
10553 | { | ||
10554 | /* operands */ | ||
10555 | { 0, }, | ||
10556 | { }, | ||
10557 | { 0, }, | ||
10558 | { 0, }, | ||
10559 | { 0, } | ||
10560 | }, | ||
10561 | { | ||
10562 | /* fixed_bit_masks */ | ||
10563 | 0ULL, | ||
10564 | 0xfbfff80000000000ULL, | ||
10565 | 0ULL, | ||
10566 | 0ULL, | ||
10567 | 0ULL | ||
10568 | }, | ||
10569 | { | ||
10570 | /* fixed_bit_values */ | ||
10571 | -1ULL, | ||
10572 | 0x400ba00000000000ULL, | ||
10573 | -1ULL, | ||
10574 | -1ULL, | ||
10575 | -1ULL | ||
10576 | } | ||
10577 | }, | ||
10578 | { "swint3", TILE_OPC_SWINT3, 0x2 /* pipes */, 0 /* num_operands */, | ||
10579 | TREG_ZERO, /* implicitly_written_register */ | ||
10580 | 0, /* can_bundle */ | ||
10581 | { | ||
10582 | /* operands */ | ||
10583 | { 0, }, | ||
10584 | { }, | ||
10585 | { 0, }, | ||
10586 | { 0, }, | ||
10587 | { 0, } | ||
10588 | }, | ||
10589 | { | ||
10590 | /* fixed_bit_masks */ | ||
10591 | 0ULL, | ||
10592 | 0xfbfff80000000000ULL, | ||
10593 | 0ULL, | ||
10594 | 0ULL, | ||
10595 | 0ULL | ||
10596 | }, | ||
10597 | { | ||
10598 | /* fixed_bit_values */ | ||
10599 | -1ULL, | ||
10600 | 0x400ba80000000000ULL, | ||
10601 | -1ULL, | ||
10602 | -1ULL, | ||
10603 | -1ULL | ||
10604 | } | ||
10605 | }, | ||
10606 | { "tblidxb0", TILE_OPC_TBLIDXB0, 0x5 /* pipes */, 2 /* num_operands */, | ||
10607 | TREG_ZERO, /* implicitly_written_register */ | ||
10608 | 1, /* can_bundle */ | ||
10609 | { | ||
10610 | /* operands */ | ||
10611 | { 21, 8 }, | ||
10612 | { 0, }, | ||
10613 | { 31, 12 }, | ||
10614 | { 0, }, | ||
10615 | { 0, } | ||
10616 | }, | ||
10617 | { | ||
10618 | /* fixed_bit_masks */ | ||
10619 | 0x800000007ffff000ULL, | ||
10620 | 0ULL, | ||
10621 | 0x80000000780ff000ULL, | ||
10622 | 0ULL, | ||
10623 | 0ULL | ||
10624 | }, | ||
10625 | { | ||
10626 | /* fixed_bit_values */ | ||
10627 | 0x0000000070168000ULL, | ||
10628 | -1ULL, | ||
10629 | 0x80000000680a8000ULL, | ||
10630 | -1ULL, | ||
10631 | -1ULL | ||
10632 | } | ||
10633 | }, | ||
10634 | { "tblidxb0.sn", TILE_OPC_TBLIDXB0_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
10635 | TREG_SN, /* implicitly_written_register */ | ||
10636 | 1, /* can_bundle */ | ||
10637 | { | ||
10638 | /* operands */ | ||
10639 | { 21, 8 }, | ||
10640 | { 0, }, | ||
10641 | { 0, }, | ||
10642 | { 0, }, | ||
10643 | { 0, } | ||
10644 | }, | ||
10645 | { | ||
10646 | /* fixed_bit_masks */ | ||
10647 | 0x800000007ffff000ULL, | ||
10648 | 0ULL, | ||
10649 | 0ULL, | ||
10650 | 0ULL, | ||
10651 | 0ULL | ||
10652 | }, | ||
10653 | { | ||
10654 | /* fixed_bit_values */ | ||
10655 | 0x0000000078168000ULL, | ||
10656 | -1ULL, | ||
10657 | -1ULL, | ||
10658 | -1ULL, | ||
10659 | -1ULL | ||
10660 | } | ||
10661 | }, | ||
10662 | { "tblidxb1", TILE_OPC_TBLIDXB1, 0x5 /* pipes */, 2 /* num_operands */, | ||
10663 | TREG_ZERO, /* implicitly_written_register */ | ||
10664 | 1, /* can_bundle */ | ||
10665 | { | ||
10666 | /* operands */ | ||
10667 | { 21, 8 }, | ||
10668 | { 0, }, | ||
10669 | { 31, 12 }, | ||
10670 | { 0, }, | ||
10671 | { 0, } | ||
10672 | }, | ||
10673 | { | ||
10674 | /* fixed_bit_masks */ | ||
10675 | 0x800000007ffff000ULL, | ||
10676 | 0ULL, | ||
10677 | 0x80000000780ff000ULL, | ||
10678 | 0ULL, | ||
10679 | 0ULL | ||
10680 | }, | ||
10681 | { | ||
10682 | /* fixed_bit_values */ | ||
10683 | 0x0000000070169000ULL, | ||
10684 | -1ULL, | ||
10685 | 0x80000000680a9000ULL, | ||
10686 | -1ULL, | ||
10687 | -1ULL | ||
10688 | } | ||
10689 | }, | ||
10690 | { "tblidxb1.sn", TILE_OPC_TBLIDXB1_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
10691 | TREG_SN, /* implicitly_written_register */ | ||
10692 | 1, /* can_bundle */ | ||
10693 | { | ||
10694 | /* operands */ | ||
10695 | { 21, 8 }, | ||
10696 | { 0, }, | ||
10697 | { 0, }, | ||
10698 | { 0, }, | ||
10699 | { 0, } | ||
10700 | }, | ||
10701 | { | ||
10702 | /* fixed_bit_masks */ | ||
10703 | 0x800000007ffff000ULL, | ||
10704 | 0ULL, | ||
10705 | 0ULL, | ||
10706 | 0ULL, | ||
10707 | 0ULL | ||
10708 | }, | ||
10709 | { | ||
10710 | /* fixed_bit_values */ | ||
10711 | 0x0000000078169000ULL, | ||
10712 | -1ULL, | ||
10713 | -1ULL, | ||
10714 | -1ULL, | ||
10715 | -1ULL | ||
10716 | } | ||
10717 | }, | ||
10718 | { "tblidxb2", TILE_OPC_TBLIDXB2, 0x5 /* pipes */, 2 /* num_operands */, | ||
10719 | TREG_ZERO, /* implicitly_written_register */ | ||
10720 | 1, /* can_bundle */ | ||
10721 | { | ||
10722 | /* operands */ | ||
10723 | { 21, 8 }, | ||
10724 | { 0, }, | ||
10725 | { 31, 12 }, | ||
10726 | { 0, }, | ||
10727 | { 0, } | ||
10728 | }, | ||
10729 | { | ||
10730 | /* fixed_bit_masks */ | ||
10731 | 0x800000007ffff000ULL, | ||
10732 | 0ULL, | ||
10733 | 0x80000000780ff000ULL, | ||
10734 | 0ULL, | ||
10735 | 0ULL | ||
10736 | }, | ||
10737 | { | ||
10738 | /* fixed_bit_values */ | ||
10739 | 0x000000007016a000ULL, | ||
10740 | -1ULL, | ||
10741 | 0x80000000680aa000ULL, | ||
10742 | -1ULL, | ||
10743 | -1ULL | ||
10744 | } | ||
10745 | }, | ||
10746 | { "tblidxb2.sn", TILE_OPC_TBLIDXB2_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
10747 | TREG_SN, /* implicitly_written_register */ | ||
10748 | 1, /* can_bundle */ | ||
10749 | { | ||
10750 | /* operands */ | ||
10751 | { 21, 8 }, | ||
10752 | { 0, }, | ||
10753 | { 0, }, | ||
10754 | { 0, }, | ||
10755 | { 0, } | ||
10756 | }, | ||
10757 | { | ||
10758 | /* fixed_bit_masks */ | ||
10759 | 0x800000007ffff000ULL, | ||
10760 | 0ULL, | ||
10761 | 0ULL, | ||
10762 | 0ULL, | ||
10763 | 0ULL | ||
10764 | }, | ||
10765 | { | ||
10766 | /* fixed_bit_values */ | ||
10767 | 0x000000007816a000ULL, | ||
10768 | -1ULL, | ||
10769 | -1ULL, | ||
10770 | -1ULL, | ||
10771 | -1ULL | ||
10772 | } | ||
10773 | }, | ||
10774 | { "tblidxb3", TILE_OPC_TBLIDXB3, 0x5 /* pipes */, 2 /* num_operands */, | ||
10775 | TREG_ZERO, /* implicitly_written_register */ | ||
10776 | 1, /* can_bundle */ | ||
10777 | { | ||
10778 | /* operands */ | ||
10779 | { 21, 8 }, | ||
10780 | { 0, }, | ||
10781 | { 31, 12 }, | ||
10782 | { 0, }, | ||
10783 | { 0, } | ||
10784 | }, | ||
10785 | { | ||
10786 | /* fixed_bit_masks */ | ||
10787 | 0x800000007ffff000ULL, | ||
10788 | 0ULL, | ||
10789 | 0x80000000780ff000ULL, | ||
10790 | 0ULL, | ||
10791 | 0ULL | ||
10792 | }, | ||
10793 | { | ||
10794 | /* fixed_bit_values */ | ||
10795 | 0x000000007016b000ULL, | ||
10796 | -1ULL, | ||
10797 | 0x80000000680ab000ULL, | ||
10798 | -1ULL, | ||
10799 | -1ULL | ||
10800 | } | ||
10801 | }, | ||
10802 | { "tblidxb3.sn", TILE_OPC_TBLIDXB3_SN, 0x1 /* pipes */, 2 /* num_operands */, | ||
10803 | TREG_SN, /* implicitly_written_register */ | ||
10804 | 1, /* can_bundle */ | ||
10805 | { | ||
10806 | /* operands */ | ||
10807 | { 21, 8 }, | ||
10808 | { 0, }, | ||
10809 | { 0, }, | ||
10810 | { 0, }, | ||
10811 | { 0, } | ||
10812 | }, | ||
10813 | { | ||
10814 | /* fixed_bit_masks */ | ||
10815 | 0x800000007ffff000ULL, | ||
10816 | 0ULL, | ||
10817 | 0ULL, | ||
10818 | 0ULL, | ||
10819 | 0ULL | ||
10820 | }, | ||
10821 | { | ||
10822 | /* fixed_bit_values */ | ||
10823 | 0x000000007816b000ULL, | ||
10824 | -1ULL, | ||
10825 | -1ULL, | ||
10826 | -1ULL, | ||
10827 | -1ULL | ||
10828 | } | ||
10829 | }, | ||
10830 | { "tns", TILE_OPC_TNS, 0x2 /* pipes */, 2 /* num_operands */, | ||
10831 | TREG_ZERO, /* implicitly_written_register */ | ||
10832 | 1, /* can_bundle */ | ||
10833 | { | ||
10834 | /* operands */ | ||
10835 | { 0, }, | ||
10836 | { 9, 10 }, | ||
10837 | { 0, }, | ||
10838 | { 0, }, | ||
10839 | { 0, } | ||
10840 | }, | ||
10841 | { | ||
10842 | /* fixed_bit_masks */ | ||
10843 | 0ULL, | ||
10844 | 0xfffff80000000000ULL, | ||
10845 | 0ULL, | ||
10846 | 0ULL, | ||
10847 | 0ULL | ||
10848 | }, | ||
10849 | { | ||
10850 | /* fixed_bit_values */ | ||
10851 | -1ULL, | ||
10852 | 0x400bb00000000000ULL, | ||
10853 | -1ULL, | ||
10854 | -1ULL, | ||
10855 | -1ULL | ||
10856 | } | ||
10857 | }, | ||
10858 | { "tns.sn", TILE_OPC_TNS_SN, 0x2 /* pipes */, 2 /* num_operands */, | ||
10859 | TREG_SN, /* implicitly_written_register */ | ||
10860 | 1, /* can_bundle */ | ||
10861 | { | ||
10862 | /* operands */ | ||
10863 | { 0, }, | ||
10864 | { 9, 10 }, | ||
10865 | { 0, }, | ||
10866 | { 0, }, | ||
10867 | { 0, } | ||
10868 | }, | ||
10869 | { | ||
10870 | /* fixed_bit_masks */ | ||
10871 | 0ULL, | ||
10872 | 0xfffff80000000000ULL, | ||
10873 | 0ULL, | ||
10874 | 0ULL, | ||
10875 | 0ULL | ||
10876 | }, | ||
10877 | { | ||
10878 | /* fixed_bit_values */ | ||
10879 | -1ULL, | ||
10880 | 0x440bb00000000000ULL, | ||
10881 | -1ULL, | ||
10882 | -1ULL, | ||
10883 | -1ULL | ||
10884 | } | ||
10885 | }, | ||
10886 | { "wh64", TILE_OPC_WH64, 0x2 /* pipes */, 1 /* num_operands */, | ||
10887 | TREG_ZERO, /* implicitly_written_register */ | ||
10888 | 1, /* can_bundle */ | ||
10889 | { | ||
10890 | /* operands */ | ||
10891 | { 0, }, | ||
10892 | { 10 }, | ||
10893 | { 0, }, | ||
10894 | { 0, }, | ||
10895 | { 0, } | ||
10896 | }, | ||
10897 | { | ||
10898 | /* fixed_bit_masks */ | ||
10899 | 0ULL, | ||
10900 | 0xfbfff80000000000ULL, | ||
10901 | 0ULL, | ||
10902 | 0ULL, | ||
10903 | 0ULL | ||
10904 | }, | ||
10905 | { | ||
10906 | /* fixed_bit_values */ | ||
10907 | -1ULL, | ||
10908 | 0x400bb80000000000ULL, | ||
10909 | -1ULL, | ||
10910 | -1ULL, | ||
10911 | -1ULL | ||
10912 | } | ||
10913 | }, | ||
10914 | { "xor", TILE_OPC_XOR, 0xf /* pipes */, 3 /* num_operands */, | ||
10915 | TREG_ZERO, /* implicitly_written_register */ | ||
10916 | 1, /* can_bundle */ | ||
10917 | { | ||
10918 | /* operands */ | ||
10919 | { 7, 8, 16 }, | ||
10920 | { 9, 10, 17 }, | ||
10921 | { 11, 12, 18 }, | ||
10922 | { 13, 14, 19 }, | ||
10923 | { 0, } | ||
10924 | }, | ||
10925 | { | ||
10926 | /* fixed_bit_masks */ | ||
10927 | 0x800000007ffc0000ULL, | ||
10928 | 0xfffe000000000000ULL, | ||
10929 | 0x80000000780c0000ULL, | ||
10930 | 0xf806000000000000ULL, | ||
10931 | 0ULL | ||
10932 | }, | ||
10933 | { | ||
10934 | /* fixed_bit_values */ | ||
10935 | 0x0000000001780000ULL, | ||
10936 | 0x0882000000000000ULL, | ||
10937 | 0x80000000180c0000ULL, | ||
10938 | 0x9806000000000000ULL, | ||
10939 | -1ULL | ||
10940 | } | ||
10941 | }, | ||
10942 | { "xor.sn", TILE_OPC_XOR_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10943 | TREG_SN, /* implicitly_written_register */ | ||
10944 | 1, /* can_bundle */ | ||
10945 | { | ||
10946 | /* operands */ | ||
10947 | { 7, 8, 16 }, | ||
10948 | { 9, 10, 17 }, | ||
10949 | { 0, }, | ||
10950 | { 0, }, | ||
10951 | { 0, } | ||
10952 | }, | ||
10953 | { | ||
10954 | /* fixed_bit_masks */ | ||
10955 | 0x800000007ffc0000ULL, | ||
10956 | 0xfffe000000000000ULL, | ||
10957 | 0ULL, | ||
10958 | 0ULL, | ||
10959 | 0ULL | ||
10960 | }, | ||
10961 | { | ||
10962 | /* fixed_bit_values */ | ||
10963 | 0x0000000009780000ULL, | ||
10964 | 0x0c82000000000000ULL, | ||
10965 | -1ULL, | ||
10966 | -1ULL, | ||
10967 | -1ULL | ||
10968 | } | ||
10969 | }, | ||
10970 | { "xori", TILE_OPC_XORI, 0x3 /* pipes */, 3 /* num_operands */, | ||
10971 | TREG_ZERO, /* implicitly_written_register */ | ||
10972 | 1, /* can_bundle */ | ||
10973 | { | ||
10974 | /* operands */ | ||
10975 | { 7, 8, 0 }, | ||
10976 | { 9, 10, 1 }, | ||
10977 | { 0, }, | ||
10978 | { 0, }, | ||
10979 | { 0, } | ||
10980 | }, | ||
10981 | { | ||
10982 | /* fixed_bit_masks */ | ||
10983 | 0x800000007ff00000ULL, | ||
10984 | 0xfff8000000000000ULL, | ||
10985 | 0ULL, | ||
10986 | 0ULL, | ||
10987 | 0ULL | ||
10988 | }, | ||
10989 | { | ||
10990 | /* fixed_bit_values */ | ||
10991 | 0x0000000050200000ULL, | ||
10992 | 0x30a8000000000000ULL, | ||
10993 | -1ULL, | ||
10994 | -1ULL, | ||
10995 | -1ULL | ||
10996 | } | ||
10997 | }, | ||
10998 | { "xori.sn", TILE_OPC_XORI_SN, 0x3 /* pipes */, 3 /* num_operands */, | ||
10999 | TREG_SN, /* implicitly_written_register */ | ||
11000 | 1, /* can_bundle */ | ||
11001 | { | ||
11002 | /* operands */ | ||
11003 | { 7, 8, 0 }, | ||
11004 | { 9, 10, 1 }, | ||
11005 | { 0, }, | ||
11006 | { 0, }, | ||
11007 | { 0, } | ||
11008 | }, | ||
11009 | { | ||
11010 | /* fixed_bit_masks */ | ||
11011 | 0x800000007ff00000ULL, | ||
11012 | 0xfff8000000000000ULL, | ||
11013 | 0ULL, | ||
11014 | 0ULL, | ||
11015 | 0ULL | ||
11016 | }, | ||
11017 | { | ||
11018 | /* fixed_bit_values */ | ||
11019 | 0x0000000058200000ULL, | ||
11020 | 0x34a8000000000000ULL, | ||
11021 | -1ULL, | ||
11022 | -1ULL, | ||
11023 | -1ULL | ||
11024 | } | ||
11025 | }, | ||
11026 | { 0, TILE_OPC_NONE, 0, 0, 0, TREG_ZERO, { { 0, } }, { 0, }, { 0, } | ||
11027 | } | ||
11028 | }; | ||
11029 | #define BITFIELD(start, size) ((start) | (((1 << (size)) - 1) << 6)) | ||
11030 | #define CHILD(array_index) (TILE_OPC_NONE + (array_index)) | ||
11031 | |||
11032 | static const unsigned short decode_X0_fsm[1153] = | ||
11033 | { | ||
11034 | BITFIELD(22, 9) /* index 0 */, | ||
11035 | CHILD(513), CHILD(530), CHILD(547), CHILD(564), CHILD(596), CHILD(613), | ||
11036 | CHILD(630), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11037 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11038 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11039 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11040 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11041 | TILE_OPC_NONE, CHILD(663), CHILD(680), CHILD(697), CHILD(714), CHILD(746), | ||
11042 | CHILD(763), CHILD(780), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11043 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11044 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11045 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11046 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11047 | TILE_OPC_NONE, TILE_OPC_NONE, CHILD(813), CHILD(813), CHILD(813), | ||
11048 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11049 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11050 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11051 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11052 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11053 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11054 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11055 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11056 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11057 | CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), CHILD(813), | ||
11058 | CHILD(813), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11059 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11060 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11061 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11062 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11063 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11064 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11065 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11066 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11067 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), | ||
11068 | CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(828), CHILD(843), | ||
11069 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11070 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11071 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11072 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11073 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11074 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11075 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11076 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11077 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11078 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11079 | CHILD(843), CHILD(843), CHILD(843), CHILD(873), CHILD(878), CHILD(883), | ||
11080 | CHILD(903), CHILD(908), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11081 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11082 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11083 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11084 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11085 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(913), | ||
11086 | CHILD(918), CHILD(923), CHILD(943), CHILD(948), TILE_OPC_NONE, | ||
11087 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11088 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11089 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11090 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11091 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11092 | TILE_OPC_NONE, CHILD(953), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11093 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11094 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11095 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11096 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11097 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11098 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(988), TILE_OPC_NONE, | ||
11099 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11100 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11101 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11102 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11103 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11104 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11105 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11106 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11107 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11108 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11109 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11110 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11111 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11112 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11113 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11114 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11115 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11116 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11117 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, CHILD(993), | ||
11118 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11119 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11120 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11121 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11122 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11123 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11124 | TILE_OPC_NONE, CHILD(1076), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11125 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11126 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11127 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11128 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11129 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11130 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11131 | BITFIELD(18, 4) /* index 513 */, | ||
11132 | TILE_OPC_NONE, TILE_OPC_ADDB, TILE_OPC_ADDH, TILE_OPC_ADD, | ||
11133 | TILE_OPC_ADIFFB_U, TILE_OPC_ADIFFH, TILE_OPC_AND, TILE_OPC_AVGB_U, | ||
11134 | TILE_OPC_AVGH, TILE_OPC_CRC32_32, TILE_OPC_CRC32_8, TILE_OPC_INTHB, | ||
11135 | TILE_OPC_INTHH, TILE_OPC_INTLB, TILE_OPC_INTLH, TILE_OPC_MAXB_U, | ||
11136 | BITFIELD(18, 4) /* index 530 */, | ||
11137 | TILE_OPC_MAXH, TILE_OPC_MINB_U, TILE_OPC_MINH, TILE_OPC_MNZB, TILE_OPC_MNZH, | ||
11138 | TILE_OPC_MNZ, TILE_OPC_MULHHA_SS, TILE_OPC_MULHHA_SU, TILE_OPC_MULHHA_UU, | ||
11139 | TILE_OPC_MULHHSA_UU, TILE_OPC_MULHH_SS, TILE_OPC_MULHH_SU, | ||
11140 | TILE_OPC_MULHH_UU, TILE_OPC_MULHLA_SS, TILE_OPC_MULHLA_SU, | ||
11141 | TILE_OPC_MULHLA_US, | ||
11142 | BITFIELD(18, 4) /* index 547 */, | ||
11143 | TILE_OPC_MULHLA_UU, TILE_OPC_MULHLSA_UU, TILE_OPC_MULHL_SS, | ||
11144 | TILE_OPC_MULHL_SU, TILE_OPC_MULHL_US, TILE_OPC_MULHL_UU, TILE_OPC_MULLLA_SS, | ||
11145 | TILE_OPC_MULLLA_SU, TILE_OPC_MULLLA_UU, TILE_OPC_MULLLSA_UU, | ||
11146 | TILE_OPC_MULLL_SS, TILE_OPC_MULLL_SU, TILE_OPC_MULLL_UU, TILE_OPC_MVNZ, | ||
11147 | TILE_OPC_MVZ, TILE_OPC_MZB, | ||
11148 | BITFIELD(18, 4) /* index 564 */, | ||
11149 | TILE_OPC_MZH, TILE_OPC_MZ, TILE_OPC_NOR, CHILD(581), TILE_OPC_PACKHB, | ||
11150 | TILE_OPC_PACKLB, TILE_OPC_RL, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_S3A, | ||
11151 | TILE_OPC_SADAB_U, TILE_OPC_SADAH, TILE_OPC_SADAH_U, TILE_OPC_SADB_U, | ||
11152 | TILE_OPC_SADH, TILE_OPC_SADH_U, | ||
11153 | BITFIELD(12, 2) /* index 581 */, | ||
11154 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(586), | ||
11155 | BITFIELD(14, 2) /* index 586 */, | ||
11156 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(591), | ||
11157 | BITFIELD(16, 2) /* index 591 */, | ||
11158 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
11159 | BITFIELD(18, 4) /* index 596 */, | ||
11160 | TILE_OPC_SEQB, TILE_OPC_SEQH, TILE_OPC_SEQ, TILE_OPC_SHLB, TILE_OPC_SHLH, | ||
11161 | TILE_OPC_SHL, TILE_OPC_SHRB, TILE_OPC_SHRH, TILE_OPC_SHR, TILE_OPC_SLTB, | ||
11162 | TILE_OPC_SLTB_U, TILE_OPC_SLTEB, TILE_OPC_SLTEB_U, TILE_OPC_SLTEH, | ||
11163 | TILE_OPC_SLTEH_U, TILE_OPC_SLTE, | ||
11164 | BITFIELD(18, 4) /* index 613 */, | ||
11165 | TILE_OPC_SLTE_U, TILE_OPC_SLTH, TILE_OPC_SLTH_U, TILE_OPC_SLT, | ||
11166 | TILE_OPC_SLT_U, TILE_OPC_SNEB, TILE_OPC_SNEH, TILE_OPC_SNE, TILE_OPC_SRAB, | ||
11167 | TILE_OPC_SRAH, TILE_OPC_SRA, TILE_OPC_SUBB, TILE_OPC_SUBH, TILE_OPC_SUB, | ||
11168 | TILE_OPC_XOR, TILE_OPC_DWORD_ALIGN, | ||
11169 | BITFIELD(18, 3) /* index 630 */, | ||
11170 | CHILD(639), CHILD(642), CHILD(645), CHILD(648), CHILD(651), CHILD(654), | ||
11171 | CHILD(657), CHILD(660), | ||
11172 | BITFIELD(21, 1) /* index 639 */, | ||
11173 | TILE_OPC_ADDS, TILE_OPC_NONE, | ||
11174 | BITFIELD(21, 1) /* index 642 */, | ||
11175 | TILE_OPC_SUBS, TILE_OPC_NONE, | ||
11176 | BITFIELD(21, 1) /* index 645 */, | ||
11177 | TILE_OPC_ADDBS_U, TILE_OPC_NONE, | ||
11178 | BITFIELD(21, 1) /* index 648 */, | ||
11179 | TILE_OPC_ADDHS, TILE_OPC_NONE, | ||
11180 | BITFIELD(21, 1) /* index 651 */, | ||
11181 | TILE_OPC_SUBBS_U, TILE_OPC_NONE, | ||
11182 | BITFIELD(21, 1) /* index 654 */, | ||
11183 | TILE_OPC_SUBHS, TILE_OPC_NONE, | ||
11184 | BITFIELD(21, 1) /* index 657 */, | ||
11185 | TILE_OPC_PACKHS, TILE_OPC_NONE, | ||
11186 | BITFIELD(21, 1) /* index 660 */, | ||
11187 | TILE_OPC_PACKBS_U, TILE_OPC_NONE, | ||
11188 | BITFIELD(18, 4) /* index 663 */, | ||
11189 | TILE_OPC_NONE, TILE_OPC_ADDB_SN, TILE_OPC_ADDH_SN, TILE_OPC_ADD_SN, | ||
11190 | TILE_OPC_ADIFFB_U_SN, TILE_OPC_ADIFFH_SN, TILE_OPC_AND_SN, | ||
11191 | TILE_OPC_AVGB_U_SN, TILE_OPC_AVGH_SN, TILE_OPC_CRC32_32_SN, | ||
11192 | TILE_OPC_CRC32_8_SN, TILE_OPC_INTHB_SN, TILE_OPC_INTHH_SN, | ||
11193 | TILE_OPC_INTLB_SN, TILE_OPC_INTLH_SN, TILE_OPC_MAXB_U_SN, | ||
11194 | BITFIELD(18, 4) /* index 680 */, | ||
11195 | TILE_OPC_MAXH_SN, TILE_OPC_MINB_U_SN, TILE_OPC_MINH_SN, TILE_OPC_MNZB_SN, | ||
11196 | TILE_OPC_MNZH_SN, TILE_OPC_MNZ_SN, TILE_OPC_MULHHA_SS_SN, | ||
11197 | TILE_OPC_MULHHA_SU_SN, TILE_OPC_MULHHA_UU_SN, TILE_OPC_MULHHSA_UU_SN, | ||
11198 | TILE_OPC_MULHH_SS_SN, TILE_OPC_MULHH_SU_SN, TILE_OPC_MULHH_UU_SN, | ||
11199 | TILE_OPC_MULHLA_SS_SN, TILE_OPC_MULHLA_SU_SN, TILE_OPC_MULHLA_US_SN, | ||
11200 | BITFIELD(18, 4) /* index 697 */, | ||
11201 | TILE_OPC_MULHLA_UU_SN, TILE_OPC_MULHLSA_UU_SN, TILE_OPC_MULHL_SS_SN, | ||
11202 | TILE_OPC_MULHL_SU_SN, TILE_OPC_MULHL_US_SN, TILE_OPC_MULHL_UU_SN, | ||
11203 | TILE_OPC_MULLLA_SS_SN, TILE_OPC_MULLLA_SU_SN, TILE_OPC_MULLLA_UU_SN, | ||
11204 | TILE_OPC_MULLLSA_UU_SN, TILE_OPC_MULLL_SS_SN, TILE_OPC_MULLL_SU_SN, | ||
11205 | TILE_OPC_MULLL_UU_SN, TILE_OPC_MVNZ_SN, TILE_OPC_MVZ_SN, TILE_OPC_MZB_SN, | ||
11206 | BITFIELD(18, 4) /* index 714 */, | ||
11207 | TILE_OPC_MZH_SN, TILE_OPC_MZ_SN, TILE_OPC_NOR_SN, CHILD(731), | ||
11208 | TILE_OPC_PACKHB_SN, TILE_OPC_PACKLB_SN, TILE_OPC_RL_SN, TILE_OPC_S1A_SN, | ||
11209 | TILE_OPC_S2A_SN, TILE_OPC_S3A_SN, TILE_OPC_SADAB_U_SN, TILE_OPC_SADAH_SN, | ||
11210 | TILE_OPC_SADAH_U_SN, TILE_OPC_SADB_U_SN, TILE_OPC_SADH_SN, | ||
11211 | TILE_OPC_SADH_U_SN, | ||
11212 | BITFIELD(12, 2) /* index 731 */, | ||
11213 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(736), | ||
11214 | BITFIELD(14, 2) /* index 736 */, | ||
11215 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(741), | ||
11216 | BITFIELD(16, 2) /* index 741 */, | ||
11217 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_MOVE_SN, | ||
11218 | BITFIELD(18, 4) /* index 746 */, | ||
11219 | TILE_OPC_SEQB_SN, TILE_OPC_SEQH_SN, TILE_OPC_SEQ_SN, TILE_OPC_SHLB_SN, | ||
11220 | TILE_OPC_SHLH_SN, TILE_OPC_SHL_SN, TILE_OPC_SHRB_SN, TILE_OPC_SHRH_SN, | ||
11221 | TILE_OPC_SHR_SN, TILE_OPC_SLTB_SN, TILE_OPC_SLTB_U_SN, TILE_OPC_SLTEB_SN, | ||
11222 | TILE_OPC_SLTEB_U_SN, TILE_OPC_SLTEH_SN, TILE_OPC_SLTEH_U_SN, | ||
11223 | TILE_OPC_SLTE_SN, | ||
11224 | BITFIELD(18, 4) /* index 763 */, | ||
11225 | TILE_OPC_SLTE_U_SN, TILE_OPC_SLTH_SN, TILE_OPC_SLTH_U_SN, TILE_OPC_SLT_SN, | ||
11226 | TILE_OPC_SLT_U_SN, TILE_OPC_SNEB_SN, TILE_OPC_SNEH_SN, TILE_OPC_SNE_SN, | ||
11227 | TILE_OPC_SRAB_SN, TILE_OPC_SRAH_SN, TILE_OPC_SRA_SN, TILE_OPC_SUBB_SN, | ||
11228 | TILE_OPC_SUBH_SN, TILE_OPC_SUB_SN, TILE_OPC_XOR_SN, TILE_OPC_DWORD_ALIGN_SN, | ||
11229 | BITFIELD(18, 3) /* index 780 */, | ||
11230 | CHILD(789), CHILD(792), CHILD(795), CHILD(798), CHILD(801), CHILD(804), | ||
11231 | CHILD(807), CHILD(810), | ||
11232 | BITFIELD(21, 1) /* index 789 */, | ||
11233 | TILE_OPC_ADDS_SN, TILE_OPC_NONE, | ||
11234 | BITFIELD(21, 1) /* index 792 */, | ||
11235 | TILE_OPC_SUBS_SN, TILE_OPC_NONE, | ||
11236 | BITFIELD(21, 1) /* index 795 */, | ||
11237 | TILE_OPC_ADDBS_U_SN, TILE_OPC_NONE, | ||
11238 | BITFIELD(21, 1) /* index 798 */, | ||
11239 | TILE_OPC_ADDHS_SN, TILE_OPC_NONE, | ||
11240 | BITFIELD(21, 1) /* index 801 */, | ||
11241 | TILE_OPC_SUBBS_U_SN, TILE_OPC_NONE, | ||
11242 | BITFIELD(21, 1) /* index 804 */, | ||
11243 | TILE_OPC_SUBHS_SN, TILE_OPC_NONE, | ||
11244 | BITFIELD(21, 1) /* index 807 */, | ||
11245 | TILE_OPC_PACKHS_SN, TILE_OPC_NONE, | ||
11246 | BITFIELD(21, 1) /* index 810 */, | ||
11247 | TILE_OPC_PACKBS_U_SN, TILE_OPC_NONE, | ||
11248 | BITFIELD(6, 2) /* index 813 */, | ||
11249 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(818), | ||
11250 | BITFIELD(8, 2) /* index 818 */, | ||
11251 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(823), | ||
11252 | BITFIELD(10, 2) /* index 823 */, | ||
11253 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_MOVELI_SN, | ||
11254 | BITFIELD(6, 2) /* index 828 */, | ||
11255 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(833), | ||
11256 | BITFIELD(8, 2) /* index 833 */, | ||
11257 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(838), | ||
11258 | BITFIELD(10, 2) /* index 838 */, | ||
11259 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_MOVELI, | ||
11260 | BITFIELD(0, 2) /* index 843 */, | ||
11261 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(848), | ||
11262 | BITFIELD(2, 2) /* index 848 */, | ||
11263 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(853), | ||
11264 | BITFIELD(4, 2) /* index 853 */, | ||
11265 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(858), | ||
11266 | BITFIELD(6, 2) /* index 858 */, | ||
11267 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(863), | ||
11268 | BITFIELD(8, 2) /* index 863 */, | ||
11269 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(868), | ||
11270 | BITFIELD(10, 2) /* index 868 */, | ||
11271 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_INFOL, | ||
11272 | BITFIELD(20, 2) /* index 873 */, | ||
11273 | TILE_OPC_NONE, TILE_OPC_ADDIB, TILE_OPC_ADDIH, TILE_OPC_ADDI, | ||
11274 | BITFIELD(20, 2) /* index 878 */, | ||
11275 | TILE_OPC_MAXIB_U, TILE_OPC_MAXIH, TILE_OPC_MINIB_U, TILE_OPC_MINIH, | ||
11276 | BITFIELD(20, 2) /* index 883 */, | ||
11277 | CHILD(888), TILE_OPC_SEQIB, TILE_OPC_SEQIH, TILE_OPC_SEQI, | ||
11278 | BITFIELD(6, 2) /* index 888 */, | ||
11279 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(893), | ||
11280 | BITFIELD(8, 2) /* index 893 */, | ||
11281 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(898), | ||
11282 | BITFIELD(10, 2) /* index 898 */, | ||
11283 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
11284 | BITFIELD(20, 2) /* index 903 */, | ||
11285 | TILE_OPC_SLTIB, TILE_OPC_SLTIB_U, TILE_OPC_SLTIH, TILE_OPC_SLTIH_U, | ||
11286 | BITFIELD(20, 2) /* index 908 */, | ||
11287 | TILE_OPC_SLTI, TILE_OPC_SLTI_U, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11288 | BITFIELD(20, 2) /* index 913 */, | ||
11289 | TILE_OPC_NONE, TILE_OPC_ADDIB_SN, TILE_OPC_ADDIH_SN, TILE_OPC_ADDI_SN, | ||
11290 | BITFIELD(20, 2) /* index 918 */, | ||
11291 | TILE_OPC_MAXIB_U_SN, TILE_OPC_MAXIH_SN, TILE_OPC_MINIB_U_SN, | ||
11292 | TILE_OPC_MINIH_SN, | ||
11293 | BITFIELD(20, 2) /* index 923 */, | ||
11294 | CHILD(928), TILE_OPC_SEQIB_SN, TILE_OPC_SEQIH_SN, TILE_OPC_SEQI_SN, | ||
11295 | BITFIELD(6, 2) /* index 928 */, | ||
11296 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(933), | ||
11297 | BITFIELD(8, 2) /* index 933 */, | ||
11298 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(938), | ||
11299 | BITFIELD(10, 2) /* index 938 */, | ||
11300 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_MOVEI_SN, | ||
11301 | BITFIELD(20, 2) /* index 943 */, | ||
11302 | TILE_OPC_SLTIB_SN, TILE_OPC_SLTIB_U_SN, TILE_OPC_SLTIH_SN, | ||
11303 | TILE_OPC_SLTIH_U_SN, | ||
11304 | BITFIELD(20, 2) /* index 948 */, | ||
11305 | TILE_OPC_SLTI_SN, TILE_OPC_SLTI_U_SN, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11306 | BITFIELD(20, 2) /* index 953 */, | ||
11307 | TILE_OPC_NONE, CHILD(958), TILE_OPC_XORI, TILE_OPC_NONE, | ||
11308 | BITFIELD(0, 2) /* index 958 */, | ||
11309 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(963), | ||
11310 | BITFIELD(2, 2) /* index 963 */, | ||
11311 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(968), | ||
11312 | BITFIELD(4, 2) /* index 968 */, | ||
11313 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(973), | ||
11314 | BITFIELD(6, 2) /* index 973 */, | ||
11315 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(978), | ||
11316 | BITFIELD(8, 2) /* index 978 */, | ||
11317 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(983), | ||
11318 | BITFIELD(10, 2) /* index 983 */, | ||
11319 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
11320 | BITFIELD(20, 2) /* index 988 */, | ||
11321 | TILE_OPC_NONE, TILE_OPC_ANDI_SN, TILE_OPC_XORI_SN, TILE_OPC_NONE, | ||
11322 | BITFIELD(17, 5) /* index 993 */, | ||
11323 | TILE_OPC_NONE, TILE_OPC_RLI, TILE_OPC_SHLIB, TILE_OPC_SHLIH, TILE_OPC_SHLI, | ||
11324 | TILE_OPC_SHRIB, TILE_OPC_SHRIH, TILE_OPC_SHRI, TILE_OPC_SRAIB, | ||
11325 | TILE_OPC_SRAIH, TILE_OPC_SRAI, CHILD(1026), TILE_OPC_NONE, TILE_OPC_NONE, | ||
11326 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11327 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11328 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11329 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11330 | BITFIELD(12, 4) /* index 1026 */, | ||
11331 | TILE_OPC_NONE, CHILD(1043), CHILD(1046), CHILD(1049), CHILD(1052), | ||
11332 | CHILD(1055), CHILD(1058), CHILD(1061), CHILD(1064), CHILD(1067), | ||
11333 | CHILD(1070), CHILD(1073), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11334 | TILE_OPC_NONE, | ||
11335 | BITFIELD(16, 1) /* index 1043 */, | ||
11336 | TILE_OPC_BITX, TILE_OPC_NONE, | ||
11337 | BITFIELD(16, 1) /* index 1046 */, | ||
11338 | TILE_OPC_BYTEX, TILE_OPC_NONE, | ||
11339 | BITFIELD(16, 1) /* index 1049 */, | ||
11340 | TILE_OPC_CLZ, TILE_OPC_NONE, | ||
11341 | BITFIELD(16, 1) /* index 1052 */, | ||
11342 | TILE_OPC_CTZ, TILE_OPC_NONE, | ||
11343 | BITFIELD(16, 1) /* index 1055 */, | ||
11344 | TILE_OPC_FNOP, TILE_OPC_NONE, | ||
11345 | BITFIELD(16, 1) /* index 1058 */, | ||
11346 | TILE_OPC_NOP, TILE_OPC_NONE, | ||
11347 | BITFIELD(16, 1) /* index 1061 */, | ||
11348 | TILE_OPC_PCNT, TILE_OPC_NONE, | ||
11349 | BITFIELD(16, 1) /* index 1064 */, | ||
11350 | TILE_OPC_TBLIDXB0, TILE_OPC_NONE, | ||
11351 | BITFIELD(16, 1) /* index 1067 */, | ||
11352 | TILE_OPC_TBLIDXB1, TILE_OPC_NONE, | ||
11353 | BITFIELD(16, 1) /* index 1070 */, | ||
11354 | TILE_OPC_TBLIDXB2, TILE_OPC_NONE, | ||
11355 | BITFIELD(16, 1) /* index 1073 */, | ||
11356 | TILE_OPC_TBLIDXB3, TILE_OPC_NONE, | ||
11357 | BITFIELD(17, 5) /* index 1076 */, | ||
11358 | TILE_OPC_NONE, TILE_OPC_RLI_SN, TILE_OPC_SHLIB_SN, TILE_OPC_SHLIH_SN, | ||
11359 | TILE_OPC_SHLI_SN, TILE_OPC_SHRIB_SN, TILE_OPC_SHRIH_SN, TILE_OPC_SHRI_SN, | ||
11360 | TILE_OPC_SRAIB_SN, TILE_OPC_SRAIH_SN, TILE_OPC_SRAI_SN, CHILD(1109), | ||
11361 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11362 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11363 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11364 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11365 | BITFIELD(12, 4) /* index 1109 */, | ||
11366 | TILE_OPC_NONE, CHILD(1126), CHILD(1129), CHILD(1132), CHILD(1135), | ||
11367 | CHILD(1055), CHILD(1058), CHILD(1138), CHILD(1141), CHILD(1144), | ||
11368 | CHILD(1147), CHILD(1150), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11369 | TILE_OPC_NONE, | ||
11370 | BITFIELD(16, 1) /* index 1126 */, | ||
11371 | TILE_OPC_BITX_SN, TILE_OPC_NONE, | ||
11372 | BITFIELD(16, 1) /* index 1129 */, | ||
11373 | TILE_OPC_BYTEX_SN, TILE_OPC_NONE, | ||
11374 | BITFIELD(16, 1) /* index 1132 */, | ||
11375 | TILE_OPC_CLZ_SN, TILE_OPC_NONE, | ||
11376 | BITFIELD(16, 1) /* index 1135 */, | ||
11377 | TILE_OPC_CTZ_SN, TILE_OPC_NONE, | ||
11378 | BITFIELD(16, 1) /* index 1138 */, | ||
11379 | TILE_OPC_PCNT_SN, TILE_OPC_NONE, | ||
11380 | BITFIELD(16, 1) /* index 1141 */, | ||
11381 | TILE_OPC_TBLIDXB0_SN, TILE_OPC_NONE, | ||
11382 | BITFIELD(16, 1) /* index 1144 */, | ||
11383 | TILE_OPC_TBLIDXB1_SN, TILE_OPC_NONE, | ||
11384 | BITFIELD(16, 1) /* index 1147 */, | ||
11385 | TILE_OPC_TBLIDXB2_SN, TILE_OPC_NONE, | ||
11386 | BITFIELD(16, 1) /* index 1150 */, | ||
11387 | TILE_OPC_TBLIDXB3_SN, TILE_OPC_NONE, | ||
11388 | }; | ||
11389 | |||
11390 | static const unsigned short decode_X1_fsm[1509] = | ||
11391 | { | ||
11392 | BITFIELD(54, 9) /* index 0 */, | ||
11393 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11394 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11395 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11396 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11397 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11398 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11399 | TILE_OPC_NONE, TILE_OPC_NONE, CHILD(513), CHILD(561), CHILD(594), | ||
11400 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11401 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11402 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(641), CHILD(689), | ||
11403 | CHILD(722), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11404 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11405 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(766), | ||
11406 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
11407 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
11408 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
11409 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
11410 | CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), CHILD(766), | ||
11411 | CHILD(766), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
11412 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
11413 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
11414 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
11415 | CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), CHILD(781), | ||
11416 | CHILD(781), CHILD(781), CHILD(781), CHILD(796), CHILD(796), CHILD(796), | ||
11417 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
11418 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
11419 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
11420 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), | ||
11421 | CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(796), CHILD(826), | ||
11422 | CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), | ||
11423 | CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), CHILD(826), | ||
11424 | CHILD(826), CHILD(826), CHILD(826), CHILD(843), CHILD(843), CHILD(843), | ||
11425 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11426 | CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), CHILD(843), | ||
11427 | CHILD(843), CHILD(860), CHILD(899), CHILD(923), CHILD(932), TILE_OPC_NONE, | ||
11428 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11429 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11430 | TILE_OPC_NONE, CHILD(941), CHILD(950), CHILD(974), CHILD(983), | ||
11431 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11432 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11433 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11434 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11435 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11436 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11437 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11438 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, | ||
11439 | TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, TILE_OPC_MM, CHILD(992), | ||
11440 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11441 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11442 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11443 | CHILD(1303), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11444 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11445 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11446 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11447 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11448 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11449 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11450 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11451 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11452 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_J, TILE_OPC_J, | ||
11453 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11454 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11455 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11456 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11457 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11458 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11459 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11460 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11461 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11462 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, TILE_OPC_J, | ||
11463 | TILE_OPC_J, TILE_OPC_J, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11464 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11465 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11466 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11467 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11468 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11469 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11470 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11471 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11472 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11473 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11474 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11475 | TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, TILE_OPC_JAL, | ||
11476 | TILE_OPC_JAL, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11477 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11478 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11479 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11480 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11481 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11482 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11483 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11484 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11485 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11486 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11487 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11488 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11489 | BITFIELD(49, 5) /* index 513 */, | ||
11490 | TILE_OPC_NONE, TILE_OPC_ADDB, TILE_OPC_ADDH, TILE_OPC_ADD, TILE_OPC_AND, | ||
11491 | TILE_OPC_INTHB, TILE_OPC_INTHH, TILE_OPC_INTLB, TILE_OPC_INTLH, | ||
11492 | TILE_OPC_JALRP, TILE_OPC_JALR, TILE_OPC_JRP, TILE_OPC_JR, TILE_OPC_LNK, | ||
11493 | TILE_OPC_MAXB_U, TILE_OPC_MAXH, TILE_OPC_MINB_U, TILE_OPC_MINH, | ||
11494 | TILE_OPC_MNZB, TILE_OPC_MNZH, TILE_OPC_MNZ, TILE_OPC_MZB, TILE_OPC_MZH, | ||
11495 | TILE_OPC_MZ, TILE_OPC_NOR, CHILD(546), TILE_OPC_PACKHB, TILE_OPC_PACKLB, | ||
11496 | TILE_OPC_RL, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_S3A, | ||
11497 | BITFIELD(43, 2) /* index 546 */, | ||
11498 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(551), | ||
11499 | BITFIELD(45, 2) /* index 551 */, | ||
11500 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(556), | ||
11501 | BITFIELD(47, 2) /* index 556 */, | ||
11502 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
11503 | BITFIELD(49, 5) /* index 561 */, | ||
11504 | TILE_OPC_SB, TILE_OPC_SEQB, TILE_OPC_SEQH, TILE_OPC_SEQ, TILE_OPC_SHLB, | ||
11505 | TILE_OPC_SHLH, TILE_OPC_SHL, TILE_OPC_SHRB, TILE_OPC_SHRH, TILE_OPC_SHR, | ||
11506 | TILE_OPC_SH, TILE_OPC_SLTB, TILE_OPC_SLTB_U, TILE_OPC_SLTEB, | ||
11507 | TILE_OPC_SLTEB_U, TILE_OPC_SLTEH, TILE_OPC_SLTEH_U, TILE_OPC_SLTE, | ||
11508 | TILE_OPC_SLTE_U, TILE_OPC_SLTH, TILE_OPC_SLTH_U, TILE_OPC_SLT, | ||
11509 | TILE_OPC_SLT_U, TILE_OPC_SNEB, TILE_OPC_SNEH, TILE_OPC_SNE, TILE_OPC_SRAB, | ||
11510 | TILE_OPC_SRAH, TILE_OPC_SRA, TILE_OPC_SUBB, TILE_OPC_SUBH, TILE_OPC_SUB, | ||
11511 | BITFIELD(49, 4) /* index 594 */, | ||
11512 | CHILD(611), CHILD(614), CHILD(617), CHILD(620), CHILD(623), CHILD(626), | ||
11513 | CHILD(629), CHILD(632), CHILD(635), CHILD(638), TILE_OPC_NONE, | ||
11514 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11515 | BITFIELD(53, 1) /* index 611 */, | ||
11516 | TILE_OPC_SW, TILE_OPC_NONE, | ||
11517 | BITFIELD(53, 1) /* index 614 */, | ||
11518 | TILE_OPC_XOR, TILE_OPC_NONE, | ||
11519 | BITFIELD(53, 1) /* index 617 */, | ||
11520 | TILE_OPC_ADDS, TILE_OPC_NONE, | ||
11521 | BITFIELD(53, 1) /* index 620 */, | ||
11522 | TILE_OPC_SUBS, TILE_OPC_NONE, | ||
11523 | BITFIELD(53, 1) /* index 623 */, | ||
11524 | TILE_OPC_ADDBS_U, TILE_OPC_NONE, | ||
11525 | BITFIELD(53, 1) /* index 626 */, | ||
11526 | TILE_OPC_ADDHS, TILE_OPC_NONE, | ||
11527 | BITFIELD(53, 1) /* index 629 */, | ||
11528 | TILE_OPC_SUBBS_U, TILE_OPC_NONE, | ||
11529 | BITFIELD(53, 1) /* index 632 */, | ||
11530 | TILE_OPC_SUBHS, TILE_OPC_NONE, | ||
11531 | BITFIELD(53, 1) /* index 635 */, | ||
11532 | TILE_OPC_PACKHS, TILE_OPC_NONE, | ||
11533 | BITFIELD(53, 1) /* index 638 */, | ||
11534 | TILE_OPC_PACKBS_U, TILE_OPC_NONE, | ||
11535 | BITFIELD(49, 5) /* index 641 */, | ||
11536 | TILE_OPC_NONE, TILE_OPC_ADDB_SN, TILE_OPC_ADDH_SN, TILE_OPC_ADD_SN, | ||
11537 | TILE_OPC_AND_SN, TILE_OPC_INTHB_SN, TILE_OPC_INTHH_SN, TILE_OPC_INTLB_SN, | ||
11538 | TILE_OPC_INTLH_SN, TILE_OPC_JALRP, TILE_OPC_JALR, TILE_OPC_JRP, TILE_OPC_JR, | ||
11539 | TILE_OPC_LNK_SN, TILE_OPC_MAXB_U_SN, TILE_OPC_MAXH_SN, TILE_OPC_MINB_U_SN, | ||
11540 | TILE_OPC_MINH_SN, TILE_OPC_MNZB_SN, TILE_OPC_MNZH_SN, TILE_OPC_MNZ_SN, | ||
11541 | TILE_OPC_MZB_SN, TILE_OPC_MZH_SN, TILE_OPC_MZ_SN, TILE_OPC_NOR_SN, | ||
11542 | CHILD(674), TILE_OPC_PACKHB_SN, TILE_OPC_PACKLB_SN, TILE_OPC_RL_SN, | ||
11543 | TILE_OPC_S1A_SN, TILE_OPC_S2A_SN, TILE_OPC_S3A_SN, | ||
11544 | BITFIELD(43, 2) /* index 674 */, | ||
11545 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(679), | ||
11546 | BITFIELD(45, 2) /* index 679 */, | ||
11547 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, CHILD(684), | ||
11548 | BITFIELD(47, 2) /* index 684 */, | ||
11549 | TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_OR_SN, TILE_OPC_MOVE_SN, | ||
11550 | BITFIELD(49, 5) /* index 689 */, | ||
11551 | TILE_OPC_SB, TILE_OPC_SEQB_SN, TILE_OPC_SEQH_SN, TILE_OPC_SEQ_SN, | ||
11552 | TILE_OPC_SHLB_SN, TILE_OPC_SHLH_SN, TILE_OPC_SHL_SN, TILE_OPC_SHRB_SN, | ||
11553 | TILE_OPC_SHRH_SN, TILE_OPC_SHR_SN, TILE_OPC_SH, TILE_OPC_SLTB_SN, | ||
11554 | TILE_OPC_SLTB_U_SN, TILE_OPC_SLTEB_SN, TILE_OPC_SLTEB_U_SN, | ||
11555 | TILE_OPC_SLTEH_SN, TILE_OPC_SLTEH_U_SN, TILE_OPC_SLTE_SN, | ||
11556 | TILE_OPC_SLTE_U_SN, TILE_OPC_SLTH_SN, TILE_OPC_SLTH_U_SN, TILE_OPC_SLT_SN, | ||
11557 | TILE_OPC_SLT_U_SN, TILE_OPC_SNEB_SN, TILE_OPC_SNEH_SN, TILE_OPC_SNE_SN, | ||
11558 | TILE_OPC_SRAB_SN, TILE_OPC_SRAH_SN, TILE_OPC_SRA_SN, TILE_OPC_SUBB_SN, | ||
11559 | TILE_OPC_SUBH_SN, TILE_OPC_SUB_SN, | ||
11560 | BITFIELD(49, 4) /* index 722 */, | ||
11561 | CHILD(611), CHILD(739), CHILD(742), CHILD(745), CHILD(748), CHILD(751), | ||
11562 | CHILD(754), CHILD(757), CHILD(760), CHILD(763), TILE_OPC_NONE, | ||
11563 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11564 | BITFIELD(53, 1) /* index 739 */, | ||
11565 | TILE_OPC_XOR_SN, TILE_OPC_NONE, | ||
11566 | BITFIELD(53, 1) /* index 742 */, | ||
11567 | TILE_OPC_ADDS_SN, TILE_OPC_NONE, | ||
11568 | BITFIELD(53, 1) /* index 745 */, | ||
11569 | TILE_OPC_SUBS_SN, TILE_OPC_NONE, | ||
11570 | BITFIELD(53, 1) /* index 748 */, | ||
11571 | TILE_OPC_ADDBS_U_SN, TILE_OPC_NONE, | ||
11572 | BITFIELD(53, 1) /* index 751 */, | ||
11573 | TILE_OPC_ADDHS_SN, TILE_OPC_NONE, | ||
11574 | BITFIELD(53, 1) /* index 754 */, | ||
11575 | TILE_OPC_SUBBS_U_SN, TILE_OPC_NONE, | ||
11576 | BITFIELD(53, 1) /* index 757 */, | ||
11577 | TILE_OPC_SUBHS_SN, TILE_OPC_NONE, | ||
11578 | BITFIELD(53, 1) /* index 760 */, | ||
11579 | TILE_OPC_PACKHS_SN, TILE_OPC_NONE, | ||
11580 | BITFIELD(53, 1) /* index 763 */, | ||
11581 | TILE_OPC_PACKBS_U_SN, TILE_OPC_NONE, | ||
11582 | BITFIELD(37, 2) /* index 766 */, | ||
11583 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(771), | ||
11584 | BITFIELD(39, 2) /* index 771 */, | ||
11585 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, CHILD(776), | ||
11586 | BITFIELD(41, 2) /* index 776 */, | ||
11587 | TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_ADDLI_SN, TILE_OPC_MOVELI_SN, | ||
11588 | BITFIELD(37, 2) /* index 781 */, | ||
11589 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(786), | ||
11590 | BITFIELD(39, 2) /* index 786 */, | ||
11591 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, CHILD(791), | ||
11592 | BITFIELD(41, 2) /* index 791 */, | ||
11593 | TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_ADDLI, TILE_OPC_MOVELI, | ||
11594 | BITFIELD(31, 2) /* index 796 */, | ||
11595 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(801), | ||
11596 | BITFIELD(33, 2) /* index 801 */, | ||
11597 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(806), | ||
11598 | BITFIELD(35, 2) /* index 806 */, | ||
11599 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(811), | ||
11600 | BITFIELD(37, 2) /* index 811 */, | ||
11601 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(816), | ||
11602 | BITFIELD(39, 2) /* index 816 */, | ||
11603 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, CHILD(821), | ||
11604 | BITFIELD(41, 2) /* index 821 */, | ||
11605 | TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_AULI, TILE_OPC_INFOL, | ||
11606 | BITFIELD(31, 4) /* index 826 */, | ||
11607 | TILE_OPC_BZ, TILE_OPC_BZT, TILE_OPC_BNZ, TILE_OPC_BNZT, TILE_OPC_BGZ, | ||
11608 | TILE_OPC_BGZT, TILE_OPC_BGEZ, TILE_OPC_BGEZT, TILE_OPC_BLZ, TILE_OPC_BLZT, | ||
11609 | TILE_OPC_BLEZ, TILE_OPC_BLEZT, TILE_OPC_BBS, TILE_OPC_BBST, TILE_OPC_BBNS, | ||
11610 | TILE_OPC_BBNST, | ||
11611 | BITFIELD(31, 4) /* index 843 */, | ||
11612 | TILE_OPC_BZ_SN, TILE_OPC_BZT_SN, TILE_OPC_BNZ_SN, TILE_OPC_BNZT_SN, | ||
11613 | TILE_OPC_BGZ_SN, TILE_OPC_BGZT_SN, TILE_OPC_BGEZ_SN, TILE_OPC_BGEZT_SN, | ||
11614 | TILE_OPC_BLZ_SN, TILE_OPC_BLZT_SN, TILE_OPC_BLEZ_SN, TILE_OPC_BLEZT_SN, | ||
11615 | TILE_OPC_BBS_SN, TILE_OPC_BBST_SN, TILE_OPC_BBNS_SN, TILE_OPC_BBNST_SN, | ||
11616 | BITFIELD(51, 3) /* index 860 */, | ||
11617 | TILE_OPC_NONE, TILE_OPC_ADDIB, TILE_OPC_ADDIH, TILE_OPC_ADDI, CHILD(869), | ||
11618 | TILE_OPC_MAXIB_U, TILE_OPC_MAXIH, TILE_OPC_MFSPR, | ||
11619 | BITFIELD(31, 2) /* index 869 */, | ||
11620 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(874), | ||
11621 | BITFIELD(33, 2) /* index 874 */, | ||
11622 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(879), | ||
11623 | BITFIELD(35, 2) /* index 879 */, | ||
11624 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(884), | ||
11625 | BITFIELD(37, 2) /* index 884 */, | ||
11626 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(889), | ||
11627 | BITFIELD(39, 2) /* index 889 */, | ||
11628 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(894), | ||
11629 | BITFIELD(41, 2) /* index 894 */, | ||
11630 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
11631 | BITFIELD(51, 3) /* index 899 */, | ||
11632 | TILE_OPC_MINIB_U, TILE_OPC_MINIH, TILE_OPC_MTSPR, CHILD(908), | ||
11633 | TILE_OPC_SEQIB, TILE_OPC_SEQIH, TILE_OPC_SEQI, TILE_OPC_SLTIB, | ||
11634 | BITFIELD(37, 2) /* index 908 */, | ||
11635 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(913), | ||
11636 | BITFIELD(39, 2) /* index 913 */, | ||
11637 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(918), | ||
11638 | BITFIELD(41, 2) /* index 918 */, | ||
11639 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
11640 | BITFIELD(51, 3) /* index 923 */, | ||
11641 | TILE_OPC_SLTIB_U, TILE_OPC_SLTIH, TILE_OPC_SLTIH_U, TILE_OPC_SLTI, | ||
11642 | TILE_OPC_SLTI_U, TILE_OPC_XORI, TILE_OPC_LBADD, TILE_OPC_LBADD_U, | ||
11643 | BITFIELD(51, 3) /* index 932 */, | ||
11644 | TILE_OPC_LHADD, TILE_OPC_LHADD_U, TILE_OPC_LWADD, TILE_OPC_LWADD_NA, | ||
11645 | TILE_OPC_SBADD, TILE_OPC_SHADD, TILE_OPC_SWADD, TILE_OPC_NONE, | ||
11646 | BITFIELD(51, 3) /* index 941 */, | ||
11647 | TILE_OPC_NONE, TILE_OPC_ADDIB_SN, TILE_OPC_ADDIH_SN, TILE_OPC_ADDI_SN, | ||
11648 | TILE_OPC_ANDI_SN, TILE_OPC_MAXIB_U_SN, TILE_OPC_MAXIH_SN, TILE_OPC_MFSPR, | ||
11649 | BITFIELD(51, 3) /* index 950 */, | ||
11650 | TILE_OPC_MINIB_U_SN, TILE_OPC_MINIH_SN, TILE_OPC_MTSPR, CHILD(959), | ||
11651 | TILE_OPC_SEQIB_SN, TILE_OPC_SEQIH_SN, TILE_OPC_SEQI_SN, TILE_OPC_SLTIB_SN, | ||
11652 | BITFIELD(37, 2) /* index 959 */, | ||
11653 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(964), | ||
11654 | BITFIELD(39, 2) /* index 964 */, | ||
11655 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, CHILD(969), | ||
11656 | BITFIELD(41, 2) /* index 969 */, | ||
11657 | TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_ORI_SN, TILE_OPC_MOVEI_SN, | ||
11658 | BITFIELD(51, 3) /* index 974 */, | ||
11659 | TILE_OPC_SLTIB_U_SN, TILE_OPC_SLTIH_SN, TILE_OPC_SLTIH_U_SN, | ||
11660 | TILE_OPC_SLTI_SN, TILE_OPC_SLTI_U_SN, TILE_OPC_XORI_SN, TILE_OPC_LBADD_SN, | ||
11661 | TILE_OPC_LBADD_U_SN, | ||
11662 | BITFIELD(51, 3) /* index 983 */, | ||
11663 | TILE_OPC_LHADD_SN, TILE_OPC_LHADD_U_SN, TILE_OPC_LWADD_SN, | ||
11664 | TILE_OPC_LWADD_NA_SN, TILE_OPC_SBADD, TILE_OPC_SHADD, TILE_OPC_SWADD, | ||
11665 | TILE_OPC_NONE, | ||
11666 | BITFIELD(46, 7) /* index 992 */, | ||
11667 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(1121), | ||
11668 | CHILD(1121), CHILD(1121), CHILD(1121), CHILD(1124), CHILD(1124), | ||
11669 | CHILD(1124), CHILD(1124), CHILD(1127), CHILD(1127), CHILD(1127), | ||
11670 | CHILD(1127), CHILD(1130), CHILD(1130), CHILD(1130), CHILD(1130), | ||
11671 | CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1133), CHILD(1136), | ||
11672 | CHILD(1136), CHILD(1136), CHILD(1136), CHILD(1139), CHILD(1139), | ||
11673 | CHILD(1139), CHILD(1139), CHILD(1142), CHILD(1142), CHILD(1142), | ||
11674 | CHILD(1142), CHILD(1145), CHILD(1145), CHILD(1145), CHILD(1145), | ||
11675 | CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1148), CHILD(1151), | ||
11676 | CHILD(1211), CHILD(1259), CHILD(1292), TILE_OPC_NONE, TILE_OPC_NONE, | ||
11677 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11678 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11679 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11680 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11681 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11682 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11683 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11684 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11685 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11686 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11687 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11688 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11689 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11690 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11691 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11692 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11693 | BITFIELD(53, 1) /* index 1121 */, | ||
11694 | TILE_OPC_RLI, TILE_OPC_NONE, | ||
11695 | BITFIELD(53, 1) /* index 1124 */, | ||
11696 | TILE_OPC_SHLIB, TILE_OPC_NONE, | ||
11697 | BITFIELD(53, 1) /* index 1127 */, | ||
11698 | TILE_OPC_SHLIH, TILE_OPC_NONE, | ||
11699 | BITFIELD(53, 1) /* index 1130 */, | ||
11700 | TILE_OPC_SHLI, TILE_OPC_NONE, | ||
11701 | BITFIELD(53, 1) /* index 1133 */, | ||
11702 | TILE_OPC_SHRIB, TILE_OPC_NONE, | ||
11703 | BITFIELD(53, 1) /* index 1136 */, | ||
11704 | TILE_OPC_SHRIH, TILE_OPC_NONE, | ||
11705 | BITFIELD(53, 1) /* index 1139 */, | ||
11706 | TILE_OPC_SHRI, TILE_OPC_NONE, | ||
11707 | BITFIELD(53, 1) /* index 1142 */, | ||
11708 | TILE_OPC_SRAIB, TILE_OPC_NONE, | ||
11709 | BITFIELD(53, 1) /* index 1145 */, | ||
11710 | TILE_OPC_SRAIH, TILE_OPC_NONE, | ||
11711 | BITFIELD(53, 1) /* index 1148 */, | ||
11712 | TILE_OPC_SRAI, TILE_OPC_NONE, | ||
11713 | BITFIELD(43, 3) /* index 1151 */, | ||
11714 | TILE_OPC_NONE, CHILD(1160), CHILD(1163), CHILD(1166), CHILD(1169), | ||
11715 | CHILD(1172), CHILD(1175), CHILD(1178), | ||
11716 | BITFIELD(53, 1) /* index 1160 */, | ||
11717 | TILE_OPC_DRAIN, TILE_OPC_NONE, | ||
11718 | BITFIELD(53, 1) /* index 1163 */, | ||
11719 | TILE_OPC_DTLBPR, TILE_OPC_NONE, | ||
11720 | BITFIELD(53, 1) /* index 1166 */, | ||
11721 | TILE_OPC_FINV, TILE_OPC_NONE, | ||
11722 | BITFIELD(53, 1) /* index 1169 */, | ||
11723 | TILE_OPC_FLUSH, TILE_OPC_NONE, | ||
11724 | BITFIELD(53, 1) /* index 1172 */, | ||
11725 | TILE_OPC_FNOP, TILE_OPC_NONE, | ||
11726 | BITFIELD(53, 1) /* index 1175 */, | ||
11727 | TILE_OPC_ICOH, TILE_OPC_NONE, | ||
11728 | BITFIELD(53, 1) /* index 1178 */, | ||
11729 | CHILD(1181), TILE_OPC_NONE, | ||
11730 | BITFIELD(31, 2) /* index 1181 */, | ||
11731 | CHILD(1186), TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_ILL, | ||
11732 | BITFIELD(33, 2) /* index 1186 */, | ||
11733 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_ILL, CHILD(1191), | ||
11734 | BITFIELD(35, 2) /* index 1191 */, | ||
11735 | TILE_OPC_ILL, CHILD(1196), TILE_OPC_ILL, TILE_OPC_ILL, | ||
11736 | BITFIELD(37, 2) /* index 1196 */, | ||
11737 | TILE_OPC_ILL, CHILD(1201), TILE_OPC_ILL, TILE_OPC_ILL, | ||
11738 | BITFIELD(39, 2) /* index 1201 */, | ||
11739 | TILE_OPC_ILL, CHILD(1206), TILE_OPC_ILL, TILE_OPC_ILL, | ||
11740 | BITFIELD(41, 2) /* index 1206 */, | ||
11741 | TILE_OPC_ILL, TILE_OPC_ILL, TILE_OPC_BPT, TILE_OPC_ILL, | ||
11742 | BITFIELD(43, 3) /* index 1211 */, | ||
11743 | CHILD(1220), CHILD(1223), CHILD(1226), CHILD(1244), CHILD(1247), | ||
11744 | CHILD(1250), CHILD(1253), CHILD(1256), | ||
11745 | BITFIELD(53, 1) /* index 1220 */, | ||
11746 | TILE_OPC_INV, TILE_OPC_NONE, | ||
11747 | BITFIELD(53, 1) /* index 1223 */, | ||
11748 | TILE_OPC_IRET, TILE_OPC_NONE, | ||
11749 | BITFIELD(53, 1) /* index 1226 */, | ||
11750 | CHILD(1229), TILE_OPC_NONE, | ||
11751 | BITFIELD(31, 2) /* index 1229 */, | ||
11752 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(1234), | ||
11753 | BITFIELD(33, 2) /* index 1234 */, | ||
11754 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(1239), | ||
11755 | BITFIELD(35, 2) /* index 1239 */, | ||
11756 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_PREFETCH, | ||
11757 | BITFIELD(53, 1) /* index 1244 */, | ||
11758 | TILE_OPC_LB_U, TILE_OPC_NONE, | ||
11759 | BITFIELD(53, 1) /* index 1247 */, | ||
11760 | TILE_OPC_LH, TILE_OPC_NONE, | ||
11761 | BITFIELD(53, 1) /* index 1250 */, | ||
11762 | TILE_OPC_LH_U, TILE_OPC_NONE, | ||
11763 | BITFIELD(53, 1) /* index 1253 */, | ||
11764 | TILE_OPC_LW, TILE_OPC_NONE, | ||
11765 | BITFIELD(53, 1) /* index 1256 */, | ||
11766 | TILE_OPC_MF, TILE_OPC_NONE, | ||
11767 | BITFIELD(43, 3) /* index 1259 */, | ||
11768 | CHILD(1268), CHILD(1271), CHILD(1274), CHILD(1277), CHILD(1280), | ||
11769 | CHILD(1283), CHILD(1286), CHILD(1289), | ||
11770 | BITFIELD(53, 1) /* index 1268 */, | ||
11771 | TILE_OPC_NAP, TILE_OPC_NONE, | ||
11772 | BITFIELD(53, 1) /* index 1271 */, | ||
11773 | TILE_OPC_NOP, TILE_OPC_NONE, | ||
11774 | BITFIELD(53, 1) /* index 1274 */, | ||
11775 | TILE_OPC_SWINT0, TILE_OPC_NONE, | ||
11776 | BITFIELD(53, 1) /* index 1277 */, | ||
11777 | TILE_OPC_SWINT1, TILE_OPC_NONE, | ||
11778 | BITFIELD(53, 1) /* index 1280 */, | ||
11779 | TILE_OPC_SWINT2, TILE_OPC_NONE, | ||
11780 | BITFIELD(53, 1) /* index 1283 */, | ||
11781 | TILE_OPC_SWINT3, TILE_OPC_NONE, | ||
11782 | BITFIELD(53, 1) /* index 1286 */, | ||
11783 | TILE_OPC_TNS, TILE_OPC_NONE, | ||
11784 | BITFIELD(53, 1) /* index 1289 */, | ||
11785 | TILE_OPC_WH64, TILE_OPC_NONE, | ||
11786 | BITFIELD(43, 2) /* index 1292 */, | ||
11787 | CHILD(1297), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11788 | BITFIELD(45, 1) /* index 1297 */, | ||
11789 | CHILD(1300), TILE_OPC_NONE, | ||
11790 | BITFIELD(53, 1) /* index 1300 */, | ||
11791 | TILE_OPC_LW_NA, TILE_OPC_NONE, | ||
11792 | BITFIELD(46, 7) /* index 1303 */, | ||
11793 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, CHILD(1432), | ||
11794 | CHILD(1432), CHILD(1432), CHILD(1432), CHILD(1435), CHILD(1435), | ||
11795 | CHILD(1435), CHILD(1435), CHILD(1438), CHILD(1438), CHILD(1438), | ||
11796 | CHILD(1438), CHILD(1441), CHILD(1441), CHILD(1441), CHILD(1441), | ||
11797 | CHILD(1444), CHILD(1444), CHILD(1444), CHILD(1444), CHILD(1447), | ||
11798 | CHILD(1447), CHILD(1447), CHILD(1447), CHILD(1450), CHILD(1450), | ||
11799 | CHILD(1450), CHILD(1450), CHILD(1453), CHILD(1453), CHILD(1453), | ||
11800 | CHILD(1453), CHILD(1456), CHILD(1456), CHILD(1456), CHILD(1456), | ||
11801 | CHILD(1459), CHILD(1459), CHILD(1459), CHILD(1459), CHILD(1151), | ||
11802 | CHILD(1462), CHILD(1486), CHILD(1498), TILE_OPC_NONE, TILE_OPC_NONE, | ||
11803 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11804 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11805 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11806 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11807 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11808 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11809 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11810 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11811 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11812 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11813 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11814 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11815 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11816 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11817 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11818 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11819 | BITFIELD(53, 1) /* index 1432 */, | ||
11820 | TILE_OPC_RLI_SN, TILE_OPC_NONE, | ||
11821 | BITFIELD(53, 1) /* index 1435 */, | ||
11822 | TILE_OPC_SHLIB_SN, TILE_OPC_NONE, | ||
11823 | BITFIELD(53, 1) /* index 1438 */, | ||
11824 | TILE_OPC_SHLIH_SN, TILE_OPC_NONE, | ||
11825 | BITFIELD(53, 1) /* index 1441 */, | ||
11826 | TILE_OPC_SHLI_SN, TILE_OPC_NONE, | ||
11827 | BITFIELD(53, 1) /* index 1444 */, | ||
11828 | TILE_OPC_SHRIB_SN, TILE_OPC_NONE, | ||
11829 | BITFIELD(53, 1) /* index 1447 */, | ||
11830 | TILE_OPC_SHRIH_SN, TILE_OPC_NONE, | ||
11831 | BITFIELD(53, 1) /* index 1450 */, | ||
11832 | TILE_OPC_SHRI_SN, TILE_OPC_NONE, | ||
11833 | BITFIELD(53, 1) /* index 1453 */, | ||
11834 | TILE_OPC_SRAIB_SN, TILE_OPC_NONE, | ||
11835 | BITFIELD(53, 1) /* index 1456 */, | ||
11836 | TILE_OPC_SRAIH_SN, TILE_OPC_NONE, | ||
11837 | BITFIELD(53, 1) /* index 1459 */, | ||
11838 | TILE_OPC_SRAI_SN, TILE_OPC_NONE, | ||
11839 | BITFIELD(43, 3) /* index 1462 */, | ||
11840 | CHILD(1220), CHILD(1223), CHILD(1471), CHILD(1474), CHILD(1477), | ||
11841 | CHILD(1480), CHILD(1483), CHILD(1256), | ||
11842 | BITFIELD(53, 1) /* index 1471 */, | ||
11843 | TILE_OPC_LB_SN, TILE_OPC_NONE, | ||
11844 | BITFIELD(53, 1) /* index 1474 */, | ||
11845 | TILE_OPC_LB_U_SN, TILE_OPC_NONE, | ||
11846 | BITFIELD(53, 1) /* index 1477 */, | ||
11847 | TILE_OPC_LH_SN, TILE_OPC_NONE, | ||
11848 | BITFIELD(53, 1) /* index 1480 */, | ||
11849 | TILE_OPC_LH_U_SN, TILE_OPC_NONE, | ||
11850 | BITFIELD(53, 1) /* index 1483 */, | ||
11851 | TILE_OPC_LW_SN, TILE_OPC_NONE, | ||
11852 | BITFIELD(43, 3) /* index 1486 */, | ||
11853 | CHILD(1268), CHILD(1271), CHILD(1274), CHILD(1277), CHILD(1280), | ||
11854 | CHILD(1283), CHILD(1495), CHILD(1289), | ||
11855 | BITFIELD(53, 1) /* index 1495 */, | ||
11856 | TILE_OPC_TNS_SN, TILE_OPC_NONE, | ||
11857 | BITFIELD(43, 2) /* index 1498 */, | ||
11858 | CHILD(1503), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11859 | BITFIELD(45, 1) /* index 1503 */, | ||
11860 | CHILD(1506), TILE_OPC_NONE, | ||
11861 | BITFIELD(53, 1) /* index 1506 */, | ||
11862 | TILE_OPC_LW_NA_SN, TILE_OPC_NONE, | ||
11863 | }; | ||
11864 | |||
11865 | static const unsigned short decode_Y0_fsm[168] = | ||
11866 | { | ||
11867 | BITFIELD(27, 4) /* index 0 */, | ||
11868 | TILE_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), | ||
11869 | CHILD(57), CHILD(62), CHILD(67), TILE_OPC_ADDI, CHILD(72), CHILD(102), | ||
11870 | TILE_OPC_SEQI, CHILD(117), TILE_OPC_SLTI, TILE_OPC_SLTI_U, | ||
11871 | BITFIELD(18, 2) /* index 17 */, | ||
11872 | TILE_OPC_ADD, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_SUB, | ||
11873 | BITFIELD(18, 2) /* index 22 */, | ||
11874 | TILE_OPC_MNZ, TILE_OPC_MVNZ, TILE_OPC_MVZ, TILE_OPC_MZ, | ||
11875 | BITFIELD(18, 2) /* index 27 */, | ||
11876 | TILE_OPC_AND, TILE_OPC_NOR, CHILD(32), TILE_OPC_XOR, | ||
11877 | BITFIELD(12, 2) /* index 32 */, | ||
11878 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(37), | ||
11879 | BITFIELD(14, 2) /* index 37 */, | ||
11880 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(42), | ||
11881 | BITFIELD(16, 2) /* index 42 */, | ||
11882 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
11883 | BITFIELD(18, 2) /* index 47 */, | ||
11884 | TILE_OPC_RL, TILE_OPC_SHL, TILE_OPC_SHR, TILE_OPC_SRA, | ||
11885 | BITFIELD(18, 2) /* index 52 */, | ||
11886 | TILE_OPC_SLTE, TILE_OPC_SLTE_U, TILE_OPC_SLT, TILE_OPC_SLT_U, | ||
11887 | BITFIELD(18, 2) /* index 57 */, | ||
11888 | TILE_OPC_MULHLSA_UU, TILE_OPC_S3A, TILE_OPC_SEQ, TILE_OPC_SNE, | ||
11889 | BITFIELD(18, 2) /* index 62 */, | ||
11890 | TILE_OPC_MULHH_SS, TILE_OPC_MULHH_UU, TILE_OPC_MULLL_SS, TILE_OPC_MULLL_UU, | ||
11891 | BITFIELD(18, 2) /* index 67 */, | ||
11892 | TILE_OPC_MULHHA_SS, TILE_OPC_MULHHA_UU, TILE_OPC_MULLLA_SS, | ||
11893 | TILE_OPC_MULLLA_UU, | ||
11894 | BITFIELD(0, 2) /* index 72 */, | ||
11895 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(77), | ||
11896 | BITFIELD(2, 2) /* index 77 */, | ||
11897 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(82), | ||
11898 | BITFIELD(4, 2) /* index 82 */, | ||
11899 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(87), | ||
11900 | BITFIELD(6, 2) /* index 87 */, | ||
11901 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(92), | ||
11902 | BITFIELD(8, 2) /* index 92 */, | ||
11903 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(97), | ||
11904 | BITFIELD(10, 2) /* index 97 */, | ||
11905 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
11906 | BITFIELD(6, 2) /* index 102 */, | ||
11907 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(107), | ||
11908 | BITFIELD(8, 2) /* index 107 */, | ||
11909 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(112), | ||
11910 | BITFIELD(10, 2) /* index 112 */, | ||
11911 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
11912 | BITFIELD(15, 5) /* index 117 */, | ||
11913 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_RLI, | ||
11914 | TILE_OPC_RLI, TILE_OPC_RLI, TILE_OPC_RLI, TILE_OPC_SHLI, TILE_OPC_SHLI, | ||
11915 | TILE_OPC_SHLI, TILE_OPC_SHLI, TILE_OPC_SHRI, TILE_OPC_SHRI, TILE_OPC_SHRI, | ||
11916 | TILE_OPC_SHRI, TILE_OPC_SRAI, TILE_OPC_SRAI, TILE_OPC_SRAI, TILE_OPC_SRAI, | ||
11917 | CHILD(150), CHILD(159), TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11918 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11919 | TILE_OPC_NONE, TILE_OPC_NONE, | ||
11920 | BITFIELD(12, 3) /* index 150 */, | ||
11921 | TILE_OPC_NONE, TILE_OPC_BITX, TILE_OPC_BYTEX, TILE_OPC_CLZ, TILE_OPC_CTZ, | ||
11922 | TILE_OPC_FNOP, TILE_OPC_NOP, TILE_OPC_PCNT, | ||
11923 | BITFIELD(12, 3) /* index 159 */, | ||
11924 | TILE_OPC_TBLIDXB0, TILE_OPC_TBLIDXB1, TILE_OPC_TBLIDXB2, TILE_OPC_TBLIDXB3, | ||
11925 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11926 | }; | ||
11927 | |||
11928 | static const unsigned short decode_Y1_fsm[140] = | ||
11929 | { | ||
11930 | BITFIELD(59, 4) /* index 0 */, | ||
11931 | TILE_OPC_NONE, CHILD(17), CHILD(22), CHILD(27), CHILD(47), CHILD(52), | ||
11932 | CHILD(57), TILE_OPC_ADDI, CHILD(62), CHILD(92), TILE_OPC_SEQI, CHILD(107), | ||
11933 | TILE_OPC_SLTI, TILE_OPC_SLTI_U, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11934 | BITFIELD(49, 2) /* index 17 */, | ||
11935 | TILE_OPC_ADD, TILE_OPC_S1A, TILE_OPC_S2A, TILE_OPC_SUB, | ||
11936 | BITFIELD(49, 2) /* index 22 */, | ||
11937 | TILE_OPC_NONE, TILE_OPC_MNZ, TILE_OPC_MZ, TILE_OPC_NONE, | ||
11938 | BITFIELD(49, 2) /* index 27 */, | ||
11939 | TILE_OPC_AND, TILE_OPC_NOR, CHILD(32), TILE_OPC_XOR, | ||
11940 | BITFIELD(43, 2) /* index 32 */, | ||
11941 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(37), | ||
11942 | BITFIELD(45, 2) /* index 37 */, | ||
11943 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, CHILD(42), | ||
11944 | BITFIELD(47, 2) /* index 42 */, | ||
11945 | TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_OR, TILE_OPC_MOVE, | ||
11946 | BITFIELD(49, 2) /* index 47 */, | ||
11947 | TILE_OPC_RL, TILE_OPC_SHL, TILE_OPC_SHR, TILE_OPC_SRA, | ||
11948 | BITFIELD(49, 2) /* index 52 */, | ||
11949 | TILE_OPC_SLTE, TILE_OPC_SLTE_U, TILE_OPC_SLT, TILE_OPC_SLT_U, | ||
11950 | BITFIELD(49, 2) /* index 57 */, | ||
11951 | TILE_OPC_NONE, TILE_OPC_S3A, TILE_OPC_SEQ, TILE_OPC_SNE, | ||
11952 | BITFIELD(31, 2) /* index 62 */, | ||
11953 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(67), | ||
11954 | BITFIELD(33, 2) /* index 67 */, | ||
11955 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(72), | ||
11956 | BITFIELD(35, 2) /* index 72 */, | ||
11957 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(77), | ||
11958 | BITFIELD(37, 2) /* index 77 */, | ||
11959 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(82), | ||
11960 | BITFIELD(39, 2) /* index 82 */, | ||
11961 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, CHILD(87), | ||
11962 | BITFIELD(41, 2) /* index 87 */, | ||
11963 | TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_ANDI, TILE_OPC_INFO, | ||
11964 | BITFIELD(37, 2) /* index 92 */, | ||
11965 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(97), | ||
11966 | BITFIELD(39, 2) /* index 97 */, | ||
11967 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, CHILD(102), | ||
11968 | BITFIELD(41, 2) /* index 102 */, | ||
11969 | TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_ORI, TILE_OPC_MOVEI, | ||
11970 | BITFIELD(48, 3) /* index 107 */, | ||
11971 | TILE_OPC_NONE, TILE_OPC_RLI, TILE_OPC_SHLI, TILE_OPC_SHRI, TILE_OPC_SRAI, | ||
11972 | CHILD(116), TILE_OPC_NONE, TILE_OPC_NONE, | ||
11973 | BITFIELD(43, 3) /* index 116 */, | ||
11974 | TILE_OPC_NONE, CHILD(125), CHILD(130), CHILD(135), TILE_OPC_NONE, | ||
11975 | TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11976 | BITFIELD(46, 2) /* index 125 */, | ||
11977 | TILE_OPC_FNOP, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11978 | BITFIELD(46, 2) /* index 130 */, | ||
11979 | TILE_OPC_ILL, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11980 | BITFIELD(46, 2) /* index 135 */, | ||
11981 | TILE_OPC_NOP, TILE_OPC_NONE, TILE_OPC_NONE, TILE_OPC_NONE, | ||
11982 | }; | ||
11983 | |||
11984 | static const unsigned short decode_Y2_fsm[24] = | ||
11985 | { | ||
11986 | BITFIELD(56, 3) /* index 0 */, | ||
11987 | CHILD(9), TILE_OPC_LB_U, TILE_OPC_LH, TILE_OPC_LH_U, TILE_OPC_LW, | ||
11988 | TILE_OPC_SB, TILE_OPC_SH, TILE_OPC_SW, | ||
11989 | BITFIELD(20, 2) /* index 9 */, | ||
11990 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(14), | ||
11991 | BITFIELD(22, 2) /* index 14 */, | ||
11992 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, CHILD(19), | ||
11993 | BITFIELD(24, 2) /* index 19 */, | ||
11994 | TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_LB, TILE_OPC_PREFETCH, | ||
11995 | }; | ||
11996 | |||
11997 | #undef BITFIELD | ||
11998 | #undef CHILD | ||
11999 | const unsigned short * const | ||
12000 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS] = | ||
12001 | { | ||
12002 | decode_X0_fsm, | ||
12003 | decode_X1_fsm, | ||
12004 | decode_Y0_fsm, | ||
12005 | decode_Y1_fsm, | ||
12006 | decode_Y2_fsm | ||
12007 | }; | ||
12008 | const struct tile_sn_opcode tile_sn_opcodes[23] = | ||
12009 | { | ||
12010 | { "bz", TILE_SN_OPC_BZ, | ||
12011 | 1 /* num_operands */, | ||
12012 | /* operands */ | ||
12013 | { 38 }, | ||
12014 | /* fixed_bit_mask */ | ||
12015 | 0xfc00, | ||
12016 | /* fixed_bit_value */ | ||
12017 | 0xe000 | ||
12018 | }, | ||
12019 | { "bnz", TILE_SN_OPC_BNZ, | ||
12020 | 1 /* num_operands */, | ||
12021 | /* operands */ | ||
12022 | { 38 }, | ||
12023 | /* fixed_bit_mask */ | ||
12024 | 0xfc00, | ||
12025 | /* fixed_bit_value */ | ||
12026 | 0xe400 | ||
12027 | }, | ||
12028 | { "jrr", TILE_SN_OPC_JRR, | ||
12029 | 1 /* num_operands */, | ||
12030 | /* operands */ | ||
12031 | { 39 }, | ||
12032 | /* fixed_bit_mask */ | ||
12033 | 0xff00, | ||
12034 | /* fixed_bit_value */ | ||
12035 | 0x0600 | ||
12036 | }, | ||
12037 | { "fnop", TILE_SN_OPC_FNOP, | ||
12038 | 0 /* num_operands */, | ||
12039 | /* operands */ | ||
12040 | { 0, }, | ||
12041 | /* fixed_bit_mask */ | ||
12042 | 0xffff, | ||
12043 | /* fixed_bit_value */ | ||
12044 | 0x0003 | ||
12045 | }, | ||
12046 | { "blz", TILE_SN_OPC_BLZ, | ||
12047 | 1 /* num_operands */, | ||
12048 | /* operands */ | ||
12049 | { 38 }, | ||
12050 | /* fixed_bit_mask */ | ||
12051 | 0xfc00, | ||
12052 | /* fixed_bit_value */ | ||
12053 | 0xf000 | ||
12054 | }, | ||
12055 | { "nop", TILE_SN_OPC_NOP, | ||
12056 | 0 /* num_operands */, | ||
12057 | /* operands */ | ||
12058 | { 0, }, | ||
12059 | /* fixed_bit_mask */ | ||
12060 | 0xffff, | ||
12061 | /* fixed_bit_value */ | ||
12062 | 0x0002 | ||
12063 | }, | ||
12064 | { "movei", TILE_SN_OPC_MOVEI, | ||
12065 | 1 /* num_operands */, | ||
12066 | /* operands */ | ||
12067 | { 40 }, | ||
12068 | /* fixed_bit_mask */ | ||
12069 | 0xff00, | ||
12070 | /* fixed_bit_value */ | ||
12071 | 0x0400 | ||
12072 | }, | ||
12073 | { "move", TILE_SN_OPC_MOVE, | ||
12074 | 2 /* num_operands */, | ||
12075 | /* operands */ | ||
12076 | { 41, 42 }, | ||
12077 | /* fixed_bit_mask */ | ||
12078 | 0xfff0, | ||
12079 | /* fixed_bit_value */ | ||
12080 | 0x0080 | ||
12081 | }, | ||
12082 | { "bgez", TILE_SN_OPC_BGEZ, | ||
12083 | 1 /* num_operands */, | ||
12084 | /* operands */ | ||
12085 | { 38 }, | ||
12086 | /* fixed_bit_mask */ | ||
12087 | 0xfc00, | ||
12088 | /* fixed_bit_value */ | ||
12089 | 0xf400 | ||
12090 | }, | ||
12091 | { "jr", TILE_SN_OPC_JR, | ||
12092 | 1 /* num_operands */, | ||
12093 | /* operands */ | ||
12094 | { 42 }, | ||
12095 | /* fixed_bit_mask */ | ||
12096 | 0xfff0, | ||
12097 | /* fixed_bit_value */ | ||
12098 | 0x0040 | ||
12099 | }, | ||
12100 | { "blez", TILE_SN_OPC_BLEZ, | ||
12101 | 1 /* num_operands */, | ||
12102 | /* operands */ | ||
12103 | { 38 }, | ||
12104 | /* fixed_bit_mask */ | ||
12105 | 0xfc00, | ||
12106 | /* fixed_bit_value */ | ||
12107 | 0xec00 | ||
12108 | }, | ||
12109 | { "bbns", TILE_SN_OPC_BBNS, | ||
12110 | 1 /* num_operands */, | ||
12111 | /* operands */ | ||
12112 | { 38 }, | ||
12113 | /* fixed_bit_mask */ | ||
12114 | 0xfc00, | ||
12115 | /* fixed_bit_value */ | ||
12116 | 0xfc00 | ||
12117 | }, | ||
12118 | { "jalrr", TILE_SN_OPC_JALRR, | ||
12119 | 1 /* num_operands */, | ||
12120 | /* operands */ | ||
12121 | { 39 }, | ||
12122 | /* fixed_bit_mask */ | ||
12123 | 0xff00, | ||
12124 | /* fixed_bit_value */ | ||
12125 | 0x0700 | ||
12126 | }, | ||
12127 | { "bpt", TILE_SN_OPC_BPT, | ||
12128 | 0 /* num_operands */, | ||
12129 | /* operands */ | ||
12130 | { 0, }, | ||
12131 | /* fixed_bit_mask */ | ||
12132 | 0xffff, | ||
12133 | /* fixed_bit_value */ | ||
12134 | 0x0001 | ||
12135 | }, | ||
12136 | { "jalr", TILE_SN_OPC_JALR, | ||
12137 | 1 /* num_operands */, | ||
12138 | /* operands */ | ||
12139 | { 42 }, | ||
12140 | /* fixed_bit_mask */ | ||
12141 | 0xfff0, | ||
12142 | /* fixed_bit_value */ | ||
12143 | 0x0050 | ||
12144 | }, | ||
12145 | { "shr1", TILE_SN_OPC_SHR1, | ||
12146 | 2 /* num_operands */, | ||
12147 | /* operands */ | ||
12148 | { 41, 42 }, | ||
12149 | /* fixed_bit_mask */ | ||
12150 | 0xfff0, | ||
12151 | /* fixed_bit_value */ | ||
12152 | 0x0090 | ||
12153 | }, | ||
12154 | { "bgz", TILE_SN_OPC_BGZ, | ||
12155 | 1 /* num_operands */, | ||
12156 | /* operands */ | ||
12157 | { 38 }, | ||
12158 | /* fixed_bit_mask */ | ||
12159 | 0xfc00, | ||
12160 | /* fixed_bit_value */ | ||
12161 | 0xe800 | ||
12162 | }, | ||
12163 | { "bbs", TILE_SN_OPC_BBS, | ||
12164 | 1 /* num_operands */, | ||
12165 | /* operands */ | ||
12166 | { 38 }, | ||
12167 | /* fixed_bit_mask */ | ||
12168 | 0xfc00, | ||
12169 | /* fixed_bit_value */ | ||
12170 | 0xf800 | ||
12171 | }, | ||
12172 | { "shl8ii", TILE_SN_OPC_SHL8II, | ||
12173 | 1 /* num_operands */, | ||
12174 | /* operands */ | ||
12175 | { 39 }, | ||
12176 | /* fixed_bit_mask */ | ||
12177 | 0xff00, | ||
12178 | /* fixed_bit_value */ | ||
12179 | 0x0300 | ||
12180 | }, | ||
12181 | { "addi", TILE_SN_OPC_ADDI, | ||
12182 | 1 /* num_operands */, | ||
12183 | /* operands */ | ||
12184 | { 40 }, | ||
12185 | /* fixed_bit_mask */ | ||
12186 | 0xff00, | ||
12187 | /* fixed_bit_value */ | ||
12188 | 0x0500 | ||
12189 | }, | ||
12190 | { "halt", TILE_SN_OPC_HALT, | ||
12191 | 0 /* num_operands */, | ||
12192 | /* operands */ | ||
12193 | { 0, }, | ||
12194 | /* fixed_bit_mask */ | ||
12195 | 0xffff, | ||
12196 | /* fixed_bit_value */ | ||
12197 | 0x0000 | ||
12198 | }, | ||
12199 | { "route", TILE_SN_OPC_ROUTE, 0, { 0, }, 0, 0, | ||
12200 | }, | ||
12201 | { 0, TILE_SN_OPC_NONE, 0, { 0, }, 0, 0, | ||
12202 | } | ||
12203 | }; | ||
12204 | const unsigned char tile_sn_route_encode[6 * 6 * 6] = | ||
12205 | { | ||
12206 | 0xdf, | ||
12207 | 0xde, | ||
12208 | 0xdd, | ||
12209 | 0xdc, | ||
12210 | 0xdb, | ||
12211 | 0xda, | ||
12212 | 0xb9, | ||
12213 | 0xb8, | ||
12214 | 0xa1, | ||
12215 | 0xa0, | ||
12216 | 0x11, | ||
12217 | 0x10, | ||
12218 | 0x9f, | ||
12219 | 0x9e, | ||
12220 | 0x9d, | ||
12221 | 0x9c, | ||
12222 | 0x9b, | ||
12223 | 0x9a, | ||
12224 | 0x79, | ||
12225 | 0x78, | ||
12226 | 0x61, | ||
12227 | 0x60, | ||
12228 | 0xb, | ||
12229 | 0xa, | ||
12230 | 0x5f, | ||
12231 | 0x5e, | ||
12232 | 0x5d, | ||
12233 | 0x5c, | ||
12234 | 0x5b, | ||
12235 | 0x5a, | ||
12236 | 0x1f, | ||
12237 | 0x1e, | ||
12238 | 0x1d, | ||
12239 | 0x1c, | ||
12240 | 0x1b, | ||
12241 | 0x1a, | ||
12242 | 0xd7, | ||
12243 | 0xd6, | ||
12244 | 0xd5, | ||
12245 | 0xd4, | ||
12246 | 0xd3, | ||
12247 | 0xd2, | ||
12248 | 0xa7, | ||
12249 | 0xa6, | ||
12250 | 0xb1, | ||
12251 | 0xb0, | ||
12252 | 0x13, | ||
12253 | 0x12, | ||
12254 | 0x97, | ||
12255 | 0x96, | ||
12256 | 0x95, | ||
12257 | 0x94, | ||
12258 | 0x93, | ||
12259 | 0x92, | ||
12260 | 0x67, | ||
12261 | 0x66, | ||
12262 | 0x71, | ||
12263 | 0x70, | ||
12264 | 0x9, | ||
12265 | 0x8, | ||
12266 | 0x57, | ||
12267 | 0x56, | ||
12268 | 0x55, | ||
12269 | 0x54, | ||
12270 | 0x53, | ||
12271 | 0x52, | ||
12272 | 0x17, | ||
12273 | 0x16, | ||
12274 | 0x15, | ||
12275 | 0x14, | ||
12276 | 0x19, | ||
12277 | 0x18, | ||
12278 | 0xcf, | ||
12279 | 0xce, | ||
12280 | 0xcd, | ||
12281 | 0xcc, | ||
12282 | 0xcb, | ||
12283 | 0xca, | ||
12284 | 0xaf, | ||
12285 | 0xae, | ||
12286 | 0xad, | ||
12287 | 0xac, | ||
12288 | 0xab, | ||
12289 | 0xaa, | ||
12290 | 0x8f, | ||
12291 | 0x8e, | ||
12292 | 0x8d, | ||
12293 | 0x8c, | ||
12294 | 0x8b, | ||
12295 | 0x8a, | ||
12296 | 0x6f, | ||
12297 | 0x6e, | ||
12298 | 0x6d, | ||
12299 | 0x6c, | ||
12300 | 0x6b, | ||
12301 | 0x6a, | ||
12302 | 0x4f, | ||
12303 | 0x4e, | ||
12304 | 0x4d, | ||
12305 | 0x4c, | ||
12306 | 0x4b, | ||
12307 | 0x4a, | ||
12308 | 0x2f, | ||
12309 | 0x2e, | ||
12310 | 0x2d, | ||
12311 | 0x2c, | ||
12312 | 0x2b, | ||
12313 | 0x2a, | ||
12314 | 0xc9, | ||
12315 | 0xc8, | ||
12316 | 0xc5, | ||
12317 | 0xc4, | ||
12318 | 0xc3, | ||
12319 | 0xc2, | ||
12320 | 0xa9, | ||
12321 | 0xa8, | ||
12322 | 0xa5, | ||
12323 | 0xa4, | ||
12324 | 0xa3, | ||
12325 | 0xa2, | ||
12326 | 0x89, | ||
12327 | 0x88, | ||
12328 | 0x85, | ||
12329 | 0x84, | ||
12330 | 0x83, | ||
12331 | 0x82, | ||
12332 | 0x69, | ||
12333 | 0x68, | ||
12334 | 0x65, | ||
12335 | 0x64, | ||
12336 | 0x63, | ||
12337 | 0x62, | ||
12338 | 0x47, | ||
12339 | 0x46, | ||
12340 | 0x45, | ||
12341 | 0x44, | ||
12342 | 0x43, | ||
12343 | 0x42, | ||
12344 | 0x27, | ||
12345 | 0x26, | ||
12346 | 0x25, | ||
12347 | 0x24, | ||
12348 | 0x23, | ||
12349 | 0x22, | ||
12350 | 0xd9, | ||
12351 | 0xd8, | ||
12352 | 0xc1, | ||
12353 | 0xc0, | ||
12354 | 0x3b, | ||
12355 | 0x3a, | ||
12356 | 0xbf, | ||
12357 | 0xbe, | ||
12358 | 0xbd, | ||
12359 | 0xbc, | ||
12360 | 0xbb, | ||
12361 | 0xba, | ||
12362 | 0x99, | ||
12363 | 0x98, | ||
12364 | 0x81, | ||
12365 | 0x80, | ||
12366 | 0x31, | ||
12367 | 0x30, | ||
12368 | 0x7f, | ||
12369 | 0x7e, | ||
12370 | 0x7d, | ||
12371 | 0x7c, | ||
12372 | 0x7b, | ||
12373 | 0x7a, | ||
12374 | 0x59, | ||
12375 | 0x58, | ||
12376 | 0x3d, | ||
12377 | 0x3c, | ||
12378 | 0x49, | ||
12379 | 0x48, | ||
12380 | 0xf, | ||
12381 | 0xe, | ||
12382 | 0xd, | ||
12383 | 0xc, | ||
12384 | 0x29, | ||
12385 | 0x28, | ||
12386 | 0xc7, | ||
12387 | 0xc6, | ||
12388 | 0xd1, | ||
12389 | 0xd0, | ||
12390 | 0x39, | ||
12391 | 0x38, | ||
12392 | 0xb7, | ||
12393 | 0xb6, | ||
12394 | 0xb5, | ||
12395 | 0xb4, | ||
12396 | 0xb3, | ||
12397 | 0xb2, | ||
12398 | 0x87, | ||
12399 | 0x86, | ||
12400 | 0x91, | ||
12401 | 0x90, | ||
12402 | 0x33, | ||
12403 | 0x32, | ||
12404 | 0x77, | ||
12405 | 0x76, | ||
12406 | 0x75, | ||
12407 | 0x74, | ||
12408 | 0x73, | ||
12409 | 0x72, | ||
12410 | 0x3f, | ||
12411 | 0x3e, | ||
12412 | 0x51, | ||
12413 | 0x50, | ||
12414 | 0x41, | ||
12415 | 0x40, | ||
12416 | 0x37, | ||
12417 | 0x36, | ||
12418 | 0x35, | ||
12419 | 0x34, | ||
12420 | 0x21, | ||
12421 | 0x20 | ||
12422 | }; | ||
12423 | |||
12424 | const signed char tile_sn_route_decode[256][3] = | ||
12425 | { | ||
12426 | { -1, -1, -1 }, | ||
12427 | { -1, -1, -1 }, | ||
12428 | { -1, -1, -1 }, | ||
12429 | { -1, -1, -1 }, | ||
12430 | { -1, -1, -1 }, | ||
12431 | { -1, -1, -1 }, | ||
12432 | { -1, -1, -1 }, | ||
12433 | { -1, -1, -1 }, | ||
12434 | { 5, 3, 1 }, | ||
12435 | { 4, 3, 1 }, | ||
12436 | { 5, 3, 0 }, | ||
12437 | { 4, 3, 0 }, | ||
12438 | { 3, 5, 4 }, | ||
12439 | { 2, 5, 4 }, | ||
12440 | { 1, 5, 4 }, | ||
12441 | { 0, 5, 4 }, | ||
12442 | { 5, 1, 0 }, | ||
12443 | { 4, 1, 0 }, | ||
12444 | { 5, 1, 1 }, | ||
12445 | { 4, 1, 1 }, | ||
12446 | { 3, 5, 1 }, | ||
12447 | { 2, 5, 1 }, | ||
12448 | { 1, 5, 1 }, | ||
12449 | { 0, 5, 1 }, | ||
12450 | { 5, 5, 1 }, | ||
12451 | { 4, 5, 1 }, | ||
12452 | { 5, 5, 0 }, | ||
12453 | { 4, 5, 0 }, | ||
12454 | { 3, 5, 0 }, | ||
12455 | { 2, 5, 0 }, | ||
12456 | { 1, 5, 0 }, | ||
12457 | { 0, 5, 0 }, | ||
12458 | { 5, 5, 5 }, | ||
12459 | { 4, 5, 5 }, | ||
12460 | { 5, 5, 3 }, | ||
12461 | { 4, 5, 3 }, | ||
12462 | { 3, 5, 3 }, | ||
12463 | { 2, 5, 3 }, | ||
12464 | { 1, 5, 3 }, | ||
12465 | { 0, 5, 3 }, | ||
12466 | { 5, 5, 4 }, | ||
12467 | { 4, 5, 4 }, | ||
12468 | { 5, 5, 2 }, | ||
12469 | { 4, 5, 2 }, | ||
12470 | { 3, 5, 2 }, | ||
12471 | { 2, 5, 2 }, | ||
12472 | { 1, 5, 2 }, | ||
12473 | { 0, 5, 2 }, | ||
12474 | { 5, 2, 4 }, | ||
12475 | { 4, 2, 4 }, | ||
12476 | { 5, 2, 5 }, | ||
12477 | { 4, 2, 5 }, | ||
12478 | { 3, 5, 5 }, | ||
12479 | { 2, 5, 5 }, | ||
12480 | { 1, 5, 5 }, | ||
12481 | { 0, 5, 5 }, | ||
12482 | { 5, 0, 5 }, | ||
12483 | { 4, 0, 5 }, | ||
12484 | { 5, 0, 4 }, | ||
12485 | { 4, 0, 4 }, | ||
12486 | { 3, 4, 4 }, | ||
12487 | { 2, 4, 4 }, | ||
12488 | { 1, 4, 5 }, | ||
12489 | { 0, 4, 5 }, | ||
12490 | { 5, 4, 5 }, | ||
12491 | { 4, 4, 5 }, | ||
12492 | { 5, 4, 3 }, | ||
12493 | { 4, 4, 3 }, | ||
12494 | { 3, 4, 3 }, | ||
12495 | { 2, 4, 3 }, | ||
12496 | { 1, 4, 3 }, | ||
12497 | { 0, 4, 3 }, | ||
12498 | { 5, 4, 4 }, | ||
12499 | { 4, 4, 4 }, | ||
12500 | { 5, 4, 2 }, | ||
12501 | { 4, 4, 2 }, | ||
12502 | { 3, 4, 2 }, | ||
12503 | { 2, 4, 2 }, | ||
12504 | { 1, 4, 2 }, | ||
12505 | { 0, 4, 2 }, | ||
12506 | { 3, 4, 5 }, | ||
12507 | { 2, 4, 5 }, | ||
12508 | { 5, 4, 1 }, | ||
12509 | { 4, 4, 1 }, | ||
12510 | { 3, 4, 1 }, | ||
12511 | { 2, 4, 1 }, | ||
12512 | { 1, 4, 1 }, | ||
12513 | { 0, 4, 1 }, | ||
12514 | { 1, 4, 4 }, | ||
12515 | { 0, 4, 4 }, | ||
12516 | { 5, 4, 0 }, | ||
12517 | { 4, 4, 0 }, | ||
12518 | { 3, 4, 0 }, | ||
12519 | { 2, 4, 0 }, | ||
12520 | { 1, 4, 0 }, | ||
12521 | { 0, 4, 0 }, | ||
12522 | { 3, 3, 0 }, | ||
12523 | { 2, 3, 0 }, | ||
12524 | { 5, 3, 3 }, | ||
12525 | { 4, 3, 3 }, | ||
12526 | { 3, 3, 3 }, | ||
12527 | { 2, 3, 3 }, | ||
12528 | { 1, 3, 1 }, | ||
12529 | { 0, 3, 1 }, | ||
12530 | { 1, 3, 3 }, | ||
12531 | { 0, 3, 3 }, | ||
12532 | { 5, 3, 2 }, | ||
12533 | { 4, 3, 2 }, | ||
12534 | { 3, 3, 2 }, | ||
12535 | { 2, 3, 2 }, | ||
12536 | { 1, 3, 2 }, | ||
12537 | { 0, 3, 2 }, | ||
12538 | { 3, 3, 1 }, | ||
12539 | { 2, 3, 1 }, | ||
12540 | { 5, 3, 5 }, | ||
12541 | { 4, 3, 5 }, | ||
12542 | { 3, 3, 5 }, | ||
12543 | { 2, 3, 5 }, | ||
12544 | { 1, 3, 5 }, | ||
12545 | { 0, 3, 5 }, | ||
12546 | { 1, 3, 0 }, | ||
12547 | { 0, 3, 0 }, | ||
12548 | { 5, 3, 4 }, | ||
12549 | { 4, 3, 4 }, | ||
12550 | { 3, 3, 4 }, | ||
12551 | { 2, 3, 4 }, | ||
12552 | { 1, 3, 4 }, | ||
12553 | { 0, 3, 4 }, | ||
12554 | { 3, 2, 4 }, | ||
12555 | { 2, 2, 4 }, | ||
12556 | { 5, 2, 3 }, | ||
12557 | { 4, 2, 3 }, | ||
12558 | { 3, 2, 3 }, | ||
12559 | { 2, 2, 3 }, | ||
12560 | { 1, 2, 5 }, | ||
12561 | { 0, 2, 5 }, | ||
12562 | { 1, 2, 3 }, | ||
12563 | { 0, 2, 3 }, | ||
12564 | { 5, 2, 2 }, | ||
12565 | { 4, 2, 2 }, | ||
12566 | { 3, 2, 2 }, | ||
12567 | { 2, 2, 2 }, | ||
12568 | { 1, 2, 2 }, | ||
12569 | { 0, 2, 2 }, | ||
12570 | { 3, 2, 5 }, | ||
12571 | { 2, 2, 5 }, | ||
12572 | { 5, 2, 1 }, | ||
12573 | { 4, 2, 1 }, | ||
12574 | { 3, 2, 1 }, | ||
12575 | { 2, 2, 1 }, | ||
12576 | { 1, 2, 1 }, | ||
12577 | { 0, 2, 1 }, | ||
12578 | { 1, 2, 4 }, | ||
12579 | { 0, 2, 4 }, | ||
12580 | { 5, 2, 0 }, | ||
12581 | { 4, 2, 0 }, | ||
12582 | { 3, 2, 0 }, | ||
12583 | { 2, 2, 0 }, | ||
12584 | { 1, 2, 0 }, | ||
12585 | { 0, 2, 0 }, | ||
12586 | { 3, 1, 0 }, | ||
12587 | { 2, 1, 0 }, | ||
12588 | { 5, 1, 3 }, | ||
12589 | { 4, 1, 3 }, | ||
12590 | { 3, 1, 3 }, | ||
12591 | { 2, 1, 3 }, | ||
12592 | { 1, 1, 1 }, | ||
12593 | { 0, 1, 1 }, | ||
12594 | { 1, 1, 3 }, | ||
12595 | { 0, 1, 3 }, | ||
12596 | { 5, 1, 2 }, | ||
12597 | { 4, 1, 2 }, | ||
12598 | { 3, 1, 2 }, | ||
12599 | { 2, 1, 2 }, | ||
12600 | { 1, 1, 2 }, | ||
12601 | { 0, 1, 2 }, | ||
12602 | { 3, 1, 1 }, | ||
12603 | { 2, 1, 1 }, | ||
12604 | { 5, 1, 5 }, | ||
12605 | { 4, 1, 5 }, | ||
12606 | { 3, 1, 5 }, | ||
12607 | { 2, 1, 5 }, | ||
12608 | { 1, 1, 5 }, | ||
12609 | { 0, 1, 5 }, | ||
12610 | { 1, 1, 0 }, | ||
12611 | { 0, 1, 0 }, | ||
12612 | { 5, 1, 4 }, | ||
12613 | { 4, 1, 4 }, | ||
12614 | { 3, 1, 4 }, | ||
12615 | { 2, 1, 4 }, | ||
12616 | { 1, 1, 4 }, | ||
12617 | { 0, 1, 4 }, | ||
12618 | { 3, 0, 4 }, | ||
12619 | { 2, 0, 4 }, | ||
12620 | { 5, 0, 3 }, | ||
12621 | { 4, 0, 3 }, | ||
12622 | { 3, 0, 3 }, | ||
12623 | { 2, 0, 3 }, | ||
12624 | { 1, 0, 5 }, | ||
12625 | { 0, 0, 5 }, | ||
12626 | { 1, 0, 3 }, | ||
12627 | { 0, 0, 3 }, | ||
12628 | { 5, 0, 2 }, | ||
12629 | { 4, 0, 2 }, | ||
12630 | { 3, 0, 2 }, | ||
12631 | { 2, 0, 2 }, | ||
12632 | { 1, 0, 2 }, | ||
12633 | { 0, 0, 2 }, | ||
12634 | { 3, 0, 5 }, | ||
12635 | { 2, 0, 5 }, | ||
12636 | { 5, 0, 1 }, | ||
12637 | { 4, 0, 1 }, | ||
12638 | { 3, 0, 1 }, | ||
12639 | { 2, 0, 1 }, | ||
12640 | { 1, 0, 1 }, | ||
12641 | { 0, 0, 1 }, | ||
12642 | { 1, 0, 4 }, | ||
12643 | { 0, 0, 4 }, | ||
12644 | { 5, 0, 0 }, | ||
12645 | { 4, 0, 0 }, | ||
12646 | { 3, 0, 0 }, | ||
12647 | { 2, 0, 0 }, | ||
12648 | { 1, 0, 0 }, | ||
12649 | { 0, 0, 0 }, | ||
12650 | { -1, -1, -1 }, | ||
12651 | { -1, -1, -1 }, | ||
12652 | { -1, -1, -1 }, | ||
12653 | { -1, -1, -1 }, | ||
12654 | { -1, -1, -1 }, | ||
12655 | { -1, -1, -1 }, | ||
12656 | { -1, -1, -1 }, | ||
12657 | { -1, -1, -1 }, | ||
12658 | { -1, -1, -1 }, | ||
12659 | { -1, -1, -1 }, | ||
12660 | { -1, -1, -1 }, | ||
12661 | { -1, -1, -1 }, | ||
12662 | { -1, -1, -1 }, | ||
12663 | { -1, -1, -1 }, | ||
12664 | { -1, -1, -1 }, | ||
12665 | { -1, -1, -1 }, | ||
12666 | { -1, -1, -1 }, | ||
12667 | { -1, -1, -1 }, | ||
12668 | { -1, -1, -1 }, | ||
12669 | { -1, -1, -1 }, | ||
12670 | { -1, -1, -1 }, | ||
12671 | { -1, -1, -1 }, | ||
12672 | { -1, -1, -1 }, | ||
12673 | { -1, -1, -1 }, | ||
12674 | { -1, -1, -1 }, | ||
12675 | { -1, -1, -1 }, | ||
12676 | { -1, -1, -1 }, | ||
12677 | { -1, -1, -1 }, | ||
12678 | { -1, -1, -1 }, | ||
12679 | { -1, -1, -1 }, | ||
12680 | { -1, -1, -1 }, | ||
12681 | { -1, -1, -1 } | ||
12682 | }; | ||
12683 | |||
12684 | const char tile_sn_direction_names[6][5] = | ||
12685 | { | ||
12686 | "w", | ||
12687 | "c", | ||
12688 | "acc", | ||
12689 | "n", | ||
12690 | "e", | ||
12691 | "s" | ||
12692 | }; | ||
12693 | |||
12694 | const signed char tile_sn_dest_map[6][6] = { | ||
12695 | { -1, 3, 4, 5, 1, 2 } /* val -> w */, | ||
12696 | { -1, 3, 4, 5, 0, 2 } /* val -> c */, | ||
12697 | { -1, 3, 4, 5, 0, 1 } /* val -> acc */, | ||
12698 | { -1, 4, 5, 0, 1, 2 } /* val -> n */, | ||
12699 | { -1, 3, 5, 0, 1, 2 } /* val -> e */, | ||
12700 | { -1, 3, 4, 0, 1, 2 } /* val -> s */ | ||
12701 | }; | ||
12702 | |||
12703 | const struct tile_operand tile_operands[43] = | ||
12704 | { | ||
12705 | { | ||
12706 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
12707 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_IMM8_X0), /* default_reloc */ | ||
12708 | 8, /* num_bits */ | ||
12709 | 1, /* is_signed */ | ||
12710 | 0, /* is_src_reg */ | ||
12711 | 0, /* is_dest_reg */ | ||
12712 | 0, /* is_pc_relative */ | ||
12713 | 0, /* rightshift */ | ||
12714 | create_Imm8_X0, /* insert */ | ||
12715 | get_Imm8_X0 /* extract */ | ||
12716 | }, | ||
12717 | { | ||
12718 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
12719 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_IMM8_X1), /* default_reloc */ | ||
12720 | 8, /* num_bits */ | ||
12721 | 1, /* is_signed */ | ||
12722 | 0, /* is_src_reg */ | ||
12723 | 0, /* is_dest_reg */ | ||
12724 | 0, /* is_pc_relative */ | ||
12725 | 0, /* rightshift */ | ||
12726 | create_Imm8_X1, /* insert */ | ||
12727 | get_Imm8_X1 /* extract */ | ||
12728 | }, | ||
12729 | { | ||
12730 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
12731 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_IMM8_Y0), /* default_reloc */ | ||
12732 | 8, /* num_bits */ | ||
12733 | 1, /* is_signed */ | ||
12734 | 0, /* is_src_reg */ | ||
12735 | 0, /* is_dest_reg */ | ||
12736 | 0, /* is_pc_relative */ | ||
12737 | 0, /* rightshift */ | ||
12738 | create_Imm8_Y0, /* insert */ | ||
12739 | get_Imm8_Y0 /* extract */ | ||
12740 | }, | ||
12741 | { | ||
12742 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
12743 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_IMM8_Y1), /* default_reloc */ | ||
12744 | 8, /* num_bits */ | ||
12745 | 1, /* is_signed */ | ||
12746 | 0, /* is_src_reg */ | ||
12747 | 0, /* is_dest_reg */ | ||
12748 | 0, /* is_pc_relative */ | ||
12749 | 0, /* rightshift */ | ||
12750 | create_Imm8_Y1, /* insert */ | ||
12751 | get_Imm8_Y1 /* extract */ | ||
12752 | }, | ||
12753 | { | ||
12754 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
12755 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_IMM16_X0), /* default_reloc */ | ||
12756 | 16, /* num_bits */ | ||
12757 | 1, /* is_signed */ | ||
12758 | 0, /* is_src_reg */ | ||
12759 | 0, /* is_dest_reg */ | ||
12760 | 0, /* is_pc_relative */ | ||
12761 | 0, /* rightshift */ | ||
12762 | create_Imm16_X0, /* insert */ | ||
12763 | get_Imm16_X0 /* extract */ | ||
12764 | }, | ||
12765 | { | ||
12766 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
12767 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_IMM16_X1), /* default_reloc */ | ||
12768 | 16, /* num_bits */ | ||
12769 | 1, /* is_signed */ | ||
12770 | 0, /* is_src_reg */ | ||
12771 | 0, /* is_dest_reg */ | ||
12772 | 0, /* is_pc_relative */ | ||
12773 | 0, /* rightshift */ | ||
12774 | create_Imm16_X1, /* insert */ | ||
12775 | get_Imm16_X1 /* extract */ | ||
12776 | }, | ||
12777 | { | ||
12778 | TILE_OP_TYPE_ADDRESS, /* type */ | ||
12779 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_JOFFLONG_X1), /* default_reloc */ | ||
12780 | 29, /* num_bits */ | ||
12781 | 1, /* is_signed */ | ||
12782 | 0, /* is_src_reg */ | ||
12783 | 0, /* is_dest_reg */ | ||
12784 | 1, /* is_pc_relative */ | ||
12785 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, /* rightshift */ | ||
12786 | create_JOffLong_X1, /* insert */ | ||
12787 | get_JOffLong_X1 /* extract */ | ||
12788 | }, | ||
12789 | { | ||
12790 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12791 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12792 | 6, /* num_bits */ | ||
12793 | 0, /* is_signed */ | ||
12794 | 0, /* is_src_reg */ | ||
12795 | 1, /* is_dest_reg */ | ||
12796 | 0, /* is_pc_relative */ | ||
12797 | 0, /* rightshift */ | ||
12798 | create_Dest_X0, /* insert */ | ||
12799 | get_Dest_X0 /* extract */ | ||
12800 | }, | ||
12801 | { | ||
12802 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12803 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12804 | 6, /* num_bits */ | ||
12805 | 0, /* is_signed */ | ||
12806 | 1, /* is_src_reg */ | ||
12807 | 0, /* is_dest_reg */ | ||
12808 | 0, /* is_pc_relative */ | ||
12809 | 0, /* rightshift */ | ||
12810 | create_SrcA_X0, /* insert */ | ||
12811 | get_SrcA_X0 /* extract */ | ||
12812 | }, | ||
12813 | { | ||
12814 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12815 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12816 | 6, /* num_bits */ | ||
12817 | 0, /* is_signed */ | ||
12818 | 0, /* is_src_reg */ | ||
12819 | 1, /* is_dest_reg */ | ||
12820 | 0, /* is_pc_relative */ | ||
12821 | 0, /* rightshift */ | ||
12822 | create_Dest_X1, /* insert */ | ||
12823 | get_Dest_X1 /* extract */ | ||
12824 | }, | ||
12825 | { | ||
12826 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12827 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12828 | 6, /* num_bits */ | ||
12829 | 0, /* is_signed */ | ||
12830 | 1, /* is_src_reg */ | ||
12831 | 0, /* is_dest_reg */ | ||
12832 | 0, /* is_pc_relative */ | ||
12833 | 0, /* rightshift */ | ||
12834 | create_SrcA_X1, /* insert */ | ||
12835 | get_SrcA_X1 /* extract */ | ||
12836 | }, | ||
12837 | { | ||
12838 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12839 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12840 | 6, /* num_bits */ | ||
12841 | 0, /* is_signed */ | ||
12842 | 0, /* is_src_reg */ | ||
12843 | 1, /* is_dest_reg */ | ||
12844 | 0, /* is_pc_relative */ | ||
12845 | 0, /* rightshift */ | ||
12846 | create_Dest_Y0, /* insert */ | ||
12847 | get_Dest_Y0 /* extract */ | ||
12848 | }, | ||
12849 | { | ||
12850 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12851 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12852 | 6, /* num_bits */ | ||
12853 | 0, /* is_signed */ | ||
12854 | 1, /* is_src_reg */ | ||
12855 | 0, /* is_dest_reg */ | ||
12856 | 0, /* is_pc_relative */ | ||
12857 | 0, /* rightshift */ | ||
12858 | create_SrcA_Y0, /* insert */ | ||
12859 | get_SrcA_Y0 /* extract */ | ||
12860 | }, | ||
12861 | { | ||
12862 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12863 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12864 | 6, /* num_bits */ | ||
12865 | 0, /* is_signed */ | ||
12866 | 0, /* is_src_reg */ | ||
12867 | 1, /* is_dest_reg */ | ||
12868 | 0, /* is_pc_relative */ | ||
12869 | 0, /* rightshift */ | ||
12870 | create_Dest_Y1, /* insert */ | ||
12871 | get_Dest_Y1 /* extract */ | ||
12872 | }, | ||
12873 | { | ||
12874 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12875 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12876 | 6, /* num_bits */ | ||
12877 | 0, /* is_signed */ | ||
12878 | 1, /* is_src_reg */ | ||
12879 | 0, /* is_dest_reg */ | ||
12880 | 0, /* is_pc_relative */ | ||
12881 | 0, /* rightshift */ | ||
12882 | create_SrcA_Y1, /* insert */ | ||
12883 | get_SrcA_Y1 /* extract */ | ||
12884 | }, | ||
12885 | { | ||
12886 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12887 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12888 | 6, /* num_bits */ | ||
12889 | 0, /* is_signed */ | ||
12890 | 1, /* is_src_reg */ | ||
12891 | 0, /* is_dest_reg */ | ||
12892 | 0, /* is_pc_relative */ | ||
12893 | 0, /* rightshift */ | ||
12894 | create_SrcA_Y2, /* insert */ | ||
12895 | get_SrcA_Y2 /* extract */ | ||
12896 | }, | ||
12897 | { | ||
12898 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12899 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12900 | 6, /* num_bits */ | ||
12901 | 0, /* is_signed */ | ||
12902 | 1, /* is_src_reg */ | ||
12903 | 0, /* is_dest_reg */ | ||
12904 | 0, /* is_pc_relative */ | ||
12905 | 0, /* rightshift */ | ||
12906 | create_SrcB_X0, /* insert */ | ||
12907 | get_SrcB_X0 /* extract */ | ||
12908 | }, | ||
12909 | { | ||
12910 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12911 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12912 | 6, /* num_bits */ | ||
12913 | 0, /* is_signed */ | ||
12914 | 1, /* is_src_reg */ | ||
12915 | 0, /* is_dest_reg */ | ||
12916 | 0, /* is_pc_relative */ | ||
12917 | 0, /* rightshift */ | ||
12918 | create_SrcB_X1, /* insert */ | ||
12919 | get_SrcB_X1 /* extract */ | ||
12920 | }, | ||
12921 | { | ||
12922 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12923 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12924 | 6, /* num_bits */ | ||
12925 | 0, /* is_signed */ | ||
12926 | 1, /* is_src_reg */ | ||
12927 | 0, /* is_dest_reg */ | ||
12928 | 0, /* is_pc_relative */ | ||
12929 | 0, /* rightshift */ | ||
12930 | create_SrcB_Y0, /* insert */ | ||
12931 | get_SrcB_Y0 /* extract */ | ||
12932 | }, | ||
12933 | { | ||
12934 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12935 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12936 | 6, /* num_bits */ | ||
12937 | 0, /* is_signed */ | ||
12938 | 1, /* is_src_reg */ | ||
12939 | 0, /* is_dest_reg */ | ||
12940 | 0, /* is_pc_relative */ | ||
12941 | 0, /* rightshift */ | ||
12942 | create_SrcB_Y1, /* insert */ | ||
12943 | get_SrcB_Y1 /* extract */ | ||
12944 | }, | ||
12945 | { | ||
12946 | TILE_OP_TYPE_ADDRESS, /* type */ | ||
12947 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_BROFF_X1), /* default_reloc */ | ||
12948 | 17, /* num_bits */ | ||
12949 | 1, /* is_signed */ | ||
12950 | 0, /* is_src_reg */ | ||
12951 | 0, /* is_dest_reg */ | ||
12952 | 1, /* is_pc_relative */ | ||
12953 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, /* rightshift */ | ||
12954 | create_BrOff_X1, /* insert */ | ||
12955 | get_BrOff_X1 /* extract */ | ||
12956 | }, | ||
12957 | { | ||
12958 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12959 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12960 | 6, /* num_bits */ | ||
12961 | 0, /* is_signed */ | ||
12962 | 1, /* is_src_reg */ | ||
12963 | 1, /* is_dest_reg */ | ||
12964 | 0, /* is_pc_relative */ | ||
12965 | 0, /* rightshift */ | ||
12966 | create_Dest_X0, /* insert */ | ||
12967 | get_Dest_X0 /* extract */ | ||
12968 | }, | ||
12969 | { | ||
12970 | TILE_OP_TYPE_ADDRESS, /* type */ | ||
12971 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12972 | 28, /* num_bits */ | ||
12973 | 1, /* is_signed */ | ||
12974 | 0, /* is_src_reg */ | ||
12975 | 0, /* is_dest_reg */ | ||
12976 | 1, /* is_pc_relative */ | ||
12977 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES, /* rightshift */ | ||
12978 | create_JOff_X1, /* insert */ | ||
12979 | get_JOff_X1 /* extract */ | ||
12980 | }, | ||
12981 | { | ||
12982 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12983 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12984 | 6, /* num_bits */ | ||
12985 | 0, /* is_signed */ | ||
12986 | 0, /* is_src_reg */ | ||
12987 | 1, /* is_dest_reg */ | ||
12988 | 0, /* is_pc_relative */ | ||
12989 | 0, /* rightshift */ | ||
12990 | create_SrcBDest_Y2, /* insert */ | ||
12991 | get_SrcBDest_Y2 /* extract */ | ||
12992 | }, | ||
12993 | { | ||
12994 | TILE_OP_TYPE_REGISTER, /* type */ | ||
12995 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
12996 | 6, /* num_bits */ | ||
12997 | 0, /* is_signed */ | ||
12998 | 1, /* is_src_reg */ | ||
12999 | 1, /* is_dest_reg */ | ||
13000 | 0, /* is_pc_relative */ | ||
13001 | 0, /* rightshift */ | ||
13002 | create_SrcA_X1, /* insert */ | ||
13003 | get_SrcA_X1 /* extract */ | ||
13004 | }, | ||
13005 | { | ||
13006 | TILE_OP_TYPE_SPR, /* type */ | ||
13007 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_MF_IMM15_X1), /* default_reloc */ | ||
13008 | 15, /* num_bits */ | ||
13009 | 0, /* is_signed */ | ||
13010 | 0, /* is_src_reg */ | ||
13011 | 0, /* is_dest_reg */ | ||
13012 | 0, /* is_pc_relative */ | ||
13013 | 0, /* rightshift */ | ||
13014 | create_MF_Imm15_X1, /* insert */ | ||
13015 | get_MF_Imm15_X1 /* extract */ | ||
13016 | }, | ||
13017 | { | ||
13018 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13019 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_MMSTART_X0), /* default_reloc */ | ||
13020 | 5, /* num_bits */ | ||
13021 | 0, /* is_signed */ | ||
13022 | 0, /* is_src_reg */ | ||
13023 | 0, /* is_dest_reg */ | ||
13024 | 0, /* is_pc_relative */ | ||
13025 | 0, /* rightshift */ | ||
13026 | create_MMStart_X0, /* insert */ | ||
13027 | get_MMStart_X0 /* extract */ | ||
13028 | }, | ||
13029 | { | ||
13030 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13031 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_MMEND_X0), /* default_reloc */ | ||
13032 | 5, /* num_bits */ | ||
13033 | 0, /* is_signed */ | ||
13034 | 0, /* is_src_reg */ | ||
13035 | 0, /* is_dest_reg */ | ||
13036 | 0, /* is_pc_relative */ | ||
13037 | 0, /* rightshift */ | ||
13038 | create_MMEnd_X0, /* insert */ | ||
13039 | get_MMEnd_X0 /* extract */ | ||
13040 | }, | ||
13041 | { | ||
13042 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13043 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_MMSTART_X1), /* default_reloc */ | ||
13044 | 5, /* num_bits */ | ||
13045 | 0, /* is_signed */ | ||
13046 | 0, /* is_src_reg */ | ||
13047 | 0, /* is_dest_reg */ | ||
13048 | 0, /* is_pc_relative */ | ||
13049 | 0, /* rightshift */ | ||
13050 | create_MMStart_X1, /* insert */ | ||
13051 | get_MMStart_X1 /* extract */ | ||
13052 | }, | ||
13053 | { | ||
13054 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13055 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_MMEND_X1), /* default_reloc */ | ||
13056 | 5, /* num_bits */ | ||
13057 | 0, /* is_signed */ | ||
13058 | 0, /* is_src_reg */ | ||
13059 | 0, /* is_dest_reg */ | ||
13060 | 0, /* is_pc_relative */ | ||
13061 | 0, /* rightshift */ | ||
13062 | create_MMEnd_X1, /* insert */ | ||
13063 | get_MMEnd_X1 /* extract */ | ||
13064 | }, | ||
13065 | { | ||
13066 | TILE_OP_TYPE_SPR, /* type */ | ||
13067 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_MT_IMM15_X1), /* default_reloc */ | ||
13068 | 15, /* num_bits */ | ||
13069 | 0, /* is_signed */ | ||
13070 | 0, /* is_src_reg */ | ||
13071 | 0, /* is_dest_reg */ | ||
13072 | 0, /* is_pc_relative */ | ||
13073 | 0, /* rightshift */ | ||
13074 | create_MT_Imm15_X1, /* insert */ | ||
13075 | get_MT_Imm15_X1 /* extract */ | ||
13076 | }, | ||
13077 | { | ||
13078 | TILE_OP_TYPE_REGISTER, /* type */ | ||
13079 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
13080 | 6, /* num_bits */ | ||
13081 | 0, /* is_signed */ | ||
13082 | 1, /* is_src_reg */ | ||
13083 | 1, /* is_dest_reg */ | ||
13084 | 0, /* is_pc_relative */ | ||
13085 | 0, /* rightshift */ | ||
13086 | create_Dest_Y0, /* insert */ | ||
13087 | get_Dest_Y0 /* extract */ | ||
13088 | }, | ||
13089 | { | ||
13090 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13091 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SHAMT_X0), /* default_reloc */ | ||
13092 | 5, /* num_bits */ | ||
13093 | 0, /* is_signed */ | ||
13094 | 0, /* is_src_reg */ | ||
13095 | 0, /* is_dest_reg */ | ||
13096 | 0, /* is_pc_relative */ | ||
13097 | 0, /* rightshift */ | ||
13098 | create_ShAmt_X0, /* insert */ | ||
13099 | get_ShAmt_X0 /* extract */ | ||
13100 | }, | ||
13101 | { | ||
13102 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13103 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SHAMT_X1), /* default_reloc */ | ||
13104 | 5, /* num_bits */ | ||
13105 | 0, /* is_signed */ | ||
13106 | 0, /* is_src_reg */ | ||
13107 | 0, /* is_dest_reg */ | ||
13108 | 0, /* is_pc_relative */ | ||
13109 | 0, /* rightshift */ | ||
13110 | create_ShAmt_X1, /* insert */ | ||
13111 | get_ShAmt_X1 /* extract */ | ||
13112 | }, | ||
13113 | { | ||
13114 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13115 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SHAMT_Y0), /* default_reloc */ | ||
13116 | 5, /* num_bits */ | ||
13117 | 0, /* is_signed */ | ||
13118 | 0, /* is_src_reg */ | ||
13119 | 0, /* is_dest_reg */ | ||
13120 | 0, /* is_pc_relative */ | ||
13121 | 0, /* rightshift */ | ||
13122 | create_ShAmt_Y0, /* insert */ | ||
13123 | get_ShAmt_Y0 /* extract */ | ||
13124 | }, | ||
13125 | { | ||
13126 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13127 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SHAMT_Y1), /* default_reloc */ | ||
13128 | 5, /* num_bits */ | ||
13129 | 0, /* is_signed */ | ||
13130 | 0, /* is_src_reg */ | ||
13131 | 0, /* is_dest_reg */ | ||
13132 | 0, /* is_pc_relative */ | ||
13133 | 0, /* rightshift */ | ||
13134 | create_ShAmt_Y1, /* insert */ | ||
13135 | get_ShAmt_Y1 /* extract */ | ||
13136 | }, | ||
13137 | { | ||
13138 | TILE_OP_TYPE_REGISTER, /* type */ | ||
13139 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
13140 | 6, /* num_bits */ | ||
13141 | 0, /* is_signed */ | ||
13142 | 1, /* is_src_reg */ | ||
13143 | 0, /* is_dest_reg */ | ||
13144 | 0, /* is_pc_relative */ | ||
13145 | 0, /* rightshift */ | ||
13146 | create_SrcBDest_Y2, /* insert */ | ||
13147 | get_SrcBDest_Y2 /* extract */ | ||
13148 | }, | ||
13149 | { | ||
13150 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13151 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
13152 | 8, /* num_bits */ | ||
13153 | 1, /* is_signed */ | ||
13154 | 0, /* is_src_reg */ | ||
13155 | 0, /* is_dest_reg */ | ||
13156 | 0, /* is_pc_relative */ | ||
13157 | 0, /* rightshift */ | ||
13158 | create_Dest_Imm8_X1, /* insert */ | ||
13159 | get_Dest_Imm8_X1 /* extract */ | ||
13160 | }, | ||
13161 | { | ||
13162 | TILE_OP_TYPE_ADDRESS, /* type */ | ||
13163 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SN_BROFF), /* default_reloc */ | ||
13164 | 10, /* num_bits */ | ||
13165 | 1, /* is_signed */ | ||
13166 | 0, /* is_src_reg */ | ||
13167 | 0, /* is_dest_reg */ | ||
13168 | 1, /* is_pc_relative */ | ||
13169 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES, /* rightshift */ | ||
13170 | create_BrOff_SN, /* insert */ | ||
13171 | get_BrOff_SN /* extract */ | ||
13172 | }, | ||
13173 | { | ||
13174 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13175 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SN_UIMM8), /* default_reloc */ | ||
13176 | 8, /* num_bits */ | ||
13177 | 0, /* is_signed */ | ||
13178 | 0, /* is_src_reg */ | ||
13179 | 0, /* is_dest_reg */ | ||
13180 | 0, /* is_pc_relative */ | ||
13181 | 0, /* rightshift */ | ||
13182 | create_Imm8_SN, /* insert */ | ||
13183 | get_Imm8_SN /* extract */ | ||
13184 | }, | ||
13185 | { | ||
13186 | TILE_OP_TYPE_IMMEDIATE, /* type */ | ||
13187 | MAYBE_BFD_RELOC(BFD_RELOC_TILE_SN_IMM8), /* default_reloc */ | ||
13188 | 8, /* num_bits */ | ||
13189 | 1, /* is_signed */ | ||
13190 | 0, /* is_src_reg */ | ||
13191 | 0, /* is_dest_reg */ | ||
13192 | 0, /* is_pc_relative */ | ||
13193 | 0, /* rightshift */ | ||
13194 | create_Imm8_SN, /* insert */ | ||
13195 | get_Imm8_SN /* extract */ | ||
13196 | }, | ||
13197 | { | ||
13198 | TILE_OP_TYPE_REGISTER, /* type */ | ||
13199 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
13200 | 2, /* num_bits */ | ||
13201 | 0, /* is_signed */ | ||
13202 | 0, /* is_src_reg */ | ||
13203 | 1, /* is_dest_reg */ | ||
13204 | 0, /* is_pc_relative */ | ||
13205 | 0, /* rightshift */ | ||
13206 | create_Dest_SN, /* insert */ | ||
13207 | get_Dest_SN /* extract */ | ||
13208 | }, | ||
13209 | { | ||
13210 | TILE_OP_TYPE_REGISTER, /* type */ | ||
13211 | MAYBE_BFD_RELOC(BFD_RELOC_NONE), /* default_reloc */ | ||
13212 | 2, /* num_bits */ | ||
13213 | 0, /* is_signed */ | ||
13214 | 1, /* is_src_reg */ | ||
13215 | 0, /* is_dest_reg */ | ||
13216 | 0, /* is_pc_relative */ | ||
13217 | 0, /* rightshift */ | ||
13218 | create_Src_SN, /* insert */ | ||
13219 | get_Src_SN /* extract */ | ||
13220 | } | ||
13221 | }; | ||
13222 | |||
13223 | const struct tile_spr tile_sprs[] = { | ||
13224 | { 0, "MPL_ITLB_MISS_SET_0" }, | ||
13225 | { 1, "MPL_ITLB_MISS_SET_1" }, | ||
13226 | { 2, "MPL_ITLB_MISS_SET_2" }, | ||
13227 | { 3, "MPL_ITLB_MISS_SET_3" }, | ||
13228 | { 4, "MPL_ITLB_MISS" }, | ||
13229 | { 256, "ITLB_CURRENT_0" }, | ||
13230 | { 257, "ITLB_CURRENT_1" }, | ||
13231 | { 258, "ITLB_CURRENT_2" }, | ||
13232 | { 259, "ITLB_CURRENT_3" }, | ||
13233 | { 260, "ITLB_INDEX" }, | ||
13234 | { 261, "ITLB_MATCH_0" }, | ||
13235 | { 262, "ITLB_PR" }, | ||
13236 | { 263, "NUMBER_ITLB" }, | ||
13237 | { 264, "REPLACEMENT_ITLB" }, | ||
13238 | { 265, "WIRED_ITLB" }, | ||
13239 | { 266, "ITLB_PERF" }, | ||
13240 | { 512, "MPL_MEM_ERROR_SET_0" }, | ||
13241 | { 513, "MPL_MEM_ERROR_SET_1" }, | ||
13242 | { 514, "MPL_MEM_ERROR_SET_2" }, | ||
13243 | { 515, "MPL_MEM_ERROR_SET_3" }, | ||
13244 | { 516, "MPL_MEM_ERROR" }, | ||
13245 | { 517, "L1_I_ERROR" }, | ||
13246 | { 518, "MEM_ERROR_CBOX_ADDR" }, | ||
13247 | { 519, "MEM_ERROR_CBOX_STATUS" }, | ||
13248 | { 520, "MEM_ERROR_ENABLE" }, | ||
13249 | { 521, "MEM_ERROR_MBOX_ADDR" }, | ||
13250 | { 522, "MEM_ERROR_MBOX_STATUS" }, | ||
13251 | { 523, "SNIC_ERROR_LOG_STATUS" }, | ||
13252 | { 524, "SNIC_ERROR_LOG_VA" }, | ||
13253 | { 525, "XDN_DEMUX_ERROR" }, | ||
13254 | { 1024, "MPL_ILL_SET_0" }, | ||
13255 | { 1025, "MPL_ILL_SET_1" }, | ||
13256 | { 1026, "MPL_ILL_SET_2" }, | ||
13257 | { 1027, "MPL_ILL_SET_3" }, | ||
13258 | { 1028, "MPL_ILL" }, | ||
13259 | { 1536, "MPL_GPV_SET_0" }, | ||
13260 | { 1537, "MPL_GPV_SET_1" }, | ||
13261 | { 1538, "MPL_GPV_SET_2" }, | ||
13262 | { 1539, "MPL_GPV_SET_3" }, | ||
13263 | { 1540, "MPL_GPV" }, | ||
13264 | { 1541, "GPV_REASON" }, | ||
13265 | { 2048, "MPL_SN_ACCESS_SET_0" }, | ||
13266 | { 2049, "MPL_SN_ACCESS_SET_1" }, | ||
13267 | { 2050, "MPL_SN_ACCESS_SET_2" }, | ||
13268 | { 2051, "MPL_SN_ACCESS_SET_3" }, | ||
13269 | { 2052, "MPL_SN_ACCESS" }, | ||
13270 | { 2053, "SNCTL" }, | ||
13271 | { 2054, "SNFIFO_DATA" }, | ||
13272 | { 2055, "SNFIFO_SEL" }, | ||
13273 | { 2056, "SNIC_INVADDR" }, | ||
13274 | { 2057, "SNISTATE" }, | ||
13275 | { 2058, "SNOSTATE" }, | ||
13276 | { 2059, "SNPC" }, | ||
13277 | { 2060, "SNSTATIC" }, | ||
13278 | { 2304, "SN_DATA_AVAIL" }, | ||
13279 | { 2560, "MPL_IDN_ACCESS_SET_0" }, | ||
13280 | { 2561, "MPL_IDN_ACCESS_SET_1" }, | ||
13281 | { 2562, "MPL_IDN_ACCESS_SET_2" }, | ||
13282 | { 2563, "MPL_IDN_ACCESS_SET_3" }, | ||
13283 | { 2564, "MPL_IDN_ACCESS" }, | ||
13284 | { 2565, "IDN_DEMUX_CA_COUNT" }, | ||
13285 | { 2566, "IDN_DEMUX_COUNT_0" }, | ||
13286 | { 2567, "IDN_DEMUX_COUNT_1" }, | ||
13287 | { 2568, "IDN_DEMUX_CTL" }, | ||
13288 | { 2569, "IDN_DEMUX_CURR_TAG" }, | ||
13289 | { 2570, "IDN_DEMUX_QUEUE_SEL" }, | ||
13290 | { 2571, "IDN_DEMUX_STATUS" }, | ||
13291 | { 2572, "IDN_DEMUX_WRITE_FIFO" }, | ||
13292 | { 2573, "IDN_DEMUX_WRITE_QUEUE" }, | ||
13293 | { 2574, "IDN_PENDING" }, | ||
13294 | { 2575, "IDN_SP_FIFO_DATA" }, | ||
13295 | { 2576, "IDN_SP_FIFO_SEL" }, | ||
13296 | { 2577, "IDN_SP_FREEZE" }, | ||
13297 | { 2578, "IDN_SP_STATE" }, | ||
13298 | { 2579, "IDN_TAG_0" }, | ||
13299 | { 2580, "IDN_TAG_1" }, | ||
13300 | { 2581, "IDN_TAG_VALID" }, | ||
13301 | { 2582, "IDN_TILE_COORD" }, | ||
13302 | { 2816, "IDN_CA_DATA" }, | ||
13303 | { 2817, "IDN_CA_REM" }, | ||
13304 | { 2818, "IDN_CA_TAG" }, | ||
13305 | { 2819, "IDN_DATA_AVAIL" }, | ||
13306 | { 3072, "MPL_UDN_ACCESS_SET_0" }, | ||
13307 | { 3073, "MPL_UDN_ACCESS_SET_1" }, | ||
13308 | { 3074, "MPL_UDN_ACCESS_SET_2" }, | ||
13309 | { 3075, "MPL_UDN_ACCESS_SET_3" }, | ||
13310 | { 3076, "MPL_UDN_ACCESS" }, | ||
13311 | { 3077, "UDN_DEMUX_CA_COUNT" }, | ||
13312 | { 3078, "UDN_DEMUX_COUNT_0" }, | ||
13313 | { 3079, "UDN_DEMUX_COUNT_1" }, | ||
13314 | { 3080, "UDN_DEMUX_COUNT_2" }, | ||
13315 | { 3081, "UDN_DEMUX_COUNT_3" }, | ||
13316 | { 3082, "UDN_DEMUX_CTL" }, | ||
13317 | { 3083, "UDN_DEMUX_CURR_TAG" }, | ||
13318 | { 3084, "UDN_DEMUX_QUEUE_SEL" }, | ||
13319 | { 3085, "UDN_DEMUX_STATUS" }, | ||
13320 | { 3086, "UDN_DEMUX_WRITE_FIFO" }, | ||
13321 | { 3087, "UDN_DEMUX_WRITE_QUEUE" }, | ||
13322 | { 3088, "UDN_PENDING" }, | ||
13323 | { 3089, "UDN_SP_FIFO_DATA" }, | ||
13324 | { 3090, "UDN_SP_FIFO_SEL" }, | ||
13325 | { 3091, "UDN_SP_FREEZE" }, | ||
13326 | { 3092, "UDN_SP_STATE" }, | ||
13327 | { 3093, "UDN_TAG_0" }, | ||
13328 | { 3094, "UDN_TAG_1" }, | ||
13329 | { 3095, "UDN_TAG_2" }, | ||
13330 | { 3096, "UDN_TAG_3" }, | ||
13331 | { 3097, "UDN_TAG_VALID" }, | ||
13332 | { 3098, "UDN_TILE_COORD" }, | ||
13333 | { 3328, "UDN_CA_DATA" }, | ||
13334 | { 3329, "UDN_CA_REM" }, | ||
13335 | { 3330, "UDN_CA_TAG" }, | ||
13336 | { 3331, "UDN_DATA_AVAIL" }, | ||
13337 | { 3584, "MPL_IDN_REFILL_SET_0" }, | ||
13338 | { 3585, "MPL_IDN_REFILL_SET_1" }, | ||
13339 | { 3586, "MPL_IDN_REFILL_SET_2" }, | ||
13340 | { 3587, "MPL_IDN_REFILL_SET_3" }, | ||
13341 | { 3588, "MPL_IDN_REFILL" }, | ||
13342 | { 3589, "IDN_REFILL_EN" }, | ||
13343 | { 4096, "MPL_UDN_REFILL_SET_0" }, | ||
13344 | { 4097, "MPL_UDN_REFILL_SET_1" }, | ||
13345 | { 4098, "MPL_UDN_REFILL_SET_2" }, | ||
13346 | { 4099, "MPL_UDN_REFILL_SET_3" }, | ||
13347 | { 4100, "MPL_UDN_REFILL" }, | ||
13348 | { 4101, "UDN_REFILL_EN" }, | ||
13349 | { 4608, "MPL_IDN_COMPLETE_SET_0" }, | ||
13350 | { 4609, "MPL_IDN_COMPLETE_SET_1" }, | ||
13351 | { 4610, "MPL_IDN_COMPLETE_SET_2" }, | ||
13352 | { 4611, "MPL_IDN_COMPLETE_SET_3" }, | ||
13353 | { 4612, "MPL_IDN_COMPLETE" }, | ||
13354 | { 4613, "IDN_REMAINING" }, | ||
13355 | { 5120, "MPL_UDN_COMPLETE_SET_0" }, | ||
13356 | { 5121, "MPL_UDN_COMPLETE_SET_1" }, | ||
13357 | { 5122, "MPL_UDN_COMPLETE_SET_2" }, | ||
13358 | { 5123, "MPL_UDN_COMPLETE_SET_3" }, | ||
13359 | { 5124, "MPL_UDN_COMPLETE" }, | ||
13360 | { 5125, "UDN_REMAINING" }, | ||
13361 | { 5632, "MPL_SWINT_3_SET_0" }, | ||
13362 | { 5633, "MPL_SWINT_3_SET_1" }, | ||
13363 | { 5634, "MPL_SWINT_3_SET_2" }, | ||
13364 | { 5635, "MPL_SWINT_3_SET_3" }, | ||
13365 | { 5636, "MPL_SWINT_3" }, | ||
13366 | { 6144, "MPL_SWINT_2_SET_0" }, | ||
13367 | { 6145, "MPL_SWINT_2_SET_1" }, | ||
13368 | { 6146, "MPL_SWINT_2_SET_2" }, | ||
13369 | { 6147, "MPL_SWINT_2_SET_3" }, | ||
13370 | { 6148, "MPL_SWINT_2" }, | ||
13371 | { 6656, "MPL_SWINT_1_SET_0" }, | ||
13372 | { 6657, "MPL_SWINT_1_SET_1" }, | ||
13373 | { 6658, "MPL_SWINT_1_SET_2" }, | ||
13374 | { 6659, "MPL_SWINT_1_SET_3" }, | ||
13375 | { 6660, "MPL_SWINT_1" }, | ||
13376 | { 7168, "MPL_SWINT_0_SET_0" }, | ||
13377 | { 7169, "MPL_SWINT_0_SET_1" }, | ||
13378 | { 7170, "MPL_SWINT_0_SET_2" }, | ||
13379 | { 7171, "MPL_SWINT_0_SET_3" }, | ||
13380 | { 7172, "MPL_SWINT_0" }, | ||
13381 | { 7680, "MPL_UNALIGN_DATA_SET_0" }, | ||
13382 | { 7681, "MPL_UNALIGN_DATA_SET_1" }, | ||
13383 | { 7682, "MPL_UNALIGN_DATA_SET_2" }, | ||
13384 | { 7683, "MPL_UNALIGN_DATA_SET_3" }, | ||
13385 | { 7684, "MPL_UNALIGN_DATA" }, | ||
13386 | { 8192, "MPL_DTLB_MISS_SET_0" }, | ||
13387 | { 8193, "MPL_DTLB_MISS_SET_1" }, | ||
13388 | { 8194, "MPL_DTLB_MISS_SET_2" }, | ||
13389 | { 8195, "MPL_DTLB_MISS_SET_3" }, | ||
13390 | { 8196, "MPL_DTLB_MISS" }, | ||
13391 | { 8448, "AER_0" }, | ||
13392 | { 8449, "AER_1" }, | ||
13393 | { 8450, "DTLB_BAD_ADDR" }, | ||
13394 | { 8451, "DTLB_BAD_ADDR_REASON" }, | ||
13395 | { 8452, "DTLB_CURRENT_0" }, | ||
13396 | { 8453, "DTLB_CURRENT_1" }, | ||
13397 | { 8454, "DTLB_CURRENT_2" }, | ||
13398 | { 8455, "DTLB_CURRENT_3" }, | ||
13399 | { 8456, "DTLB_INDEX" }, | ||
13400 | { 8457, "DTLB_MATCH_0" }, | ||
13401 | { 8458, "NUMBER_DTLB" }, | ||
13402 | { 8459, "PHYSICAL_MEMORY_MODE" }, | ||
13403 | { 8460, "REPLACEMENT_DTLB" }, | ||
13404 | { 8461, "WIRED_DTLB" }, | ||
13405 | { 8462, "CACHE_RED_WAY_OVERRIDDEN" }, | ||
13406 | { 8463, "DTLB_PERF" }, | ||
13407 | { 8704, "MPL_DTLB_ACCESS_SET_0" }, | ||
13408 | { 8705, "MPL_DTLB_ACCESS_SET_1" }, | ||
13409 | { 8706, "MPL_DTLB_ACCESS_SET_2" }, | ||
13410 | { 8707, "MPL_DTLB_ACCESS_SET_3" }, | ||
13411 | { 8708, "MPL_DTLB_ACCESS" }, | ||
13412 | { 9216, "MPL_DMATLB_MISS_SET_0" }, | ||
13413 | { 9217, "MPL_DMATLB_MISS_SET_1" }, | ||
13414 | { 9218, "MPL_DMATLB_MISS_SET_2" }, | ||
13415 | { 9219, "MPL_DMATLB_MISS_SET_3" }, | ||
13416 | { 9220, "MPL_DMATLB_MISS" }, | ||
13417 | { 9472, "DMA_BAD_ADDR" }, | ||
13418 | { 9473, "DMA_STATUS" }, | ||
13419 | { 9728, "MPL_DMATLB_ACCESS_SET_0" }, | ||
13420 | { 9729, "MPL_DMATLB_ACCESS_SET_1" }, | ||
13421 | { 9730, "MPL_DMATLB_ACCESS_SET_2" }, | ||
13422 | { 9731, "MPL_DMATLB_ACCESS_SET_3" }, | ||
13423 | { 9732, "MPL_DMATLB_ACCESS" }, | ||
13424 | { 10240, "MPL_SNITLB_MISS_SET_0" }, | ||
13425 | { 10241, "MPL_SNITLB_MISS_SET_1" }, | ||
13426 | { 10242, "MPL_SNITLB_MISS_SET_2" }, | ||
13427 | { 10243, "MPL_SNITLB_MISS_SET_3" }, | ||
13428 | { 10244, "MPL_SNITLB_MISS" }, | ||
13429 | { 10245, "NUMBER_SNITLB" }, | ||
13430 | { 10246, "REPLACEMENT_SNITLB" }, | ||
13431 | { 10247, "SNITLB_CURRENT_0" }, | ||
13432 | { 10248, "SNITLB_CURRENT_1" }, | ||
13433 | { 10249, "SNITLB_CURRENT_2" }, | ||
13434 | { 10250, "SNITLB_CURRENT_3" }, | ||
13435 | { 10251, "SNITLB_INDEX" }, | ||
13436 | { 10252, "SNITLB_MATCH_0" }, | ||
13437 | { 10253, "SNITLB_PR" }, | ||
13438 | { 10254, "WIRED_SNITLB" }, | ||
13439 | { 10255, "SNITLB_STATUS" }, | ||
13440 | { 10752, "MPL_SN_NOTIFY_SET_0" }, | ||
13441 | { 10753, "MPL_SN_NOTIFY_SET_1" }, | ||
13442 | { 10754, "MPL_SN_NOTIFY_SET_2" }, | ||
13443 | { 10755, "MPL_SN_NOTIFY_SET_3" }, | ||
13444 | { 10756, "MPL_SN_NOTIFY" }, | ||
13445 | { 10757, "SN_NOTIFY_STATUS" }, | ||
13446 | { 11264, "MPL_SN_FIREWALL_SET_0" }, | ||
13447 | { 11265, "MPL_SN_FIREWALL_SET_1" }, | ||
13448 | { 11266, "MPL_SN_FIREWALL_SET_2" }, | ||
13449 | { 11267, "MPL_SN_FIREWALL_SET_3" }, | ||
13450 | { 11268, "MPL_SN_FIREWALL" }, | ||
13451 | { 11269, "SN_DIRECTION_PROTECT" }, | ||
13452 | { 11776, "MPL_IDN_FIREWALL_SET_0" }, | ||
13453 | { 11777, "MPL_IDN_FIREWALL_SET_1" }, | ||
13454 | { 11778, "MPL_IDN_FIREWALL_SET_2" }, | ||
13455 | { 11779, "MPL_IDN_FIREWALL_SET_3" }, | ||
13456 | { 11780, "MPL_IDN_FIREWALL" }, | ||
13457 | { 11781, "IDN_DIRECTION_PROTECT" }, | ||
13458 | { 12288, "MPL_UDN_FIREWALL_SET_0" }, | ||
13459 | { 12289, "MPL_UDN_FIREWALL_SET_1" }, | ||
13460 | { 12290, "MPL_UDN_FIREWALL_SET_2" }, | ||
13461 | { 12291, "MPL_UDN_FIREWALL_SET_3" }, | ||
13462 | { 12292, "MPL_UDN_FIREWALL" }, | ||
13463 | { 12293, "UDN_DIRECTION_PROTECT" }, | ||
13464 | { 12800, "MPL_TILE_TIMER_SET_0" }, | ||
13465 | { 12801, "MPL_TILE_TIMER_SET_1" }, | ||
13466 | { 12802, "MPL_TILE_TIMER_SET_2" }, | ||
13467 | { 12803, "MPL_TILE_TIMER_SET_3" }, | ||
13468 | { 12804, "MPL_TILE_TIMER" }, | ||
13469 | { 12805, "TILE_TIMER_CONTROL" }, | ||
13470 | { 13312, "MPL_IDN_TIMER_SET_0" }, | ||
13471 | { 13313, "MPL_IDN_TIMER_SET_1" }, | ||
13472 | { 13314, "MPL_IDN_TIMER_SET_2" }, | ||
13473 | { 13315, "MPL_IDN_TIMER_SET_3" }, | ||
13474 | { 13316, "MPL_IDN_TIMER" }, | ||
13475 | { 13317, "IDN_DEADLOCK_COUNT" }, | ||
13476 | { 13318, "IDN_DEADLOCK_TIMEOUT" }, | ||
13477 | { 13824, "MPL_UDN_TIMER_SET_0" }, | ||
13478 | { 13825, "MPL_UDN_TIMER_SET_1" }, | ||
13479 | { 13826, "MPL_UDN_TIMER_SET_2" }, | ||
13480 | { 13827, "MPL_UDN_TIMER_SET_3" }, | ||
13481 | { 13828, "MPL_UDN_TIMER" }, | ||
13482 | { 13829, "UDN_DEADLOCK_COUNT" }, | ||
13483 | { 13830, "UDN_DEADLOCK_TIMEOUT" }, | ||
13484 | { 14336, "MPL_DMA_NOTIFY_SET_0" }, | ||
13485 | { 14337, "MPL_DMA_NOTIFY_SET_1" }, | ||
13486 | { 14338, "MPL_DMA_NOTIFY_SET_2" }, | ||
13487 | { 14339, "MPL_DMA_NOTIFY_SET_3" }, | ||
13488 | { 14340, "MPL_DMA_NOTIFY" }, | ||
13489 | { 14592, "DMA_BYTE" }, | ||
13490 | { 14593, "DMA_CHUNK_SIZE" }, | ||
13491 | { 14594, "DMA_CTR" }, | ||
13492 | { 14595, "DMA_DST_ADDR" }, | ||
13493 | { 14596, "DMA_DST_CHUNK_ADDR" }, | ||
13494 | { 14597, "DMA_SRC_ADDR" }, | ||
13495 | { 14598, "DMA_SRC_CHUNK_ADDR" }, | ||
13496 | { 14599, "DMA_STRIDE" }, | ||
13497 | { 14600, "DMA_USER_STATUS" }, | ||
13498 | { 14848, "MPL_IDN_CA_SET_0" }, | ||
13499 | { 14849, "MPL_IDN_CA_SET_1" }, | ||
13500 | { 14850, "MPL_IDN_CA_SET_2" }, | ||
13501 | { 14851, "MPL_IDN_CA_SET_3" }, | ||
13502 | { 14852, "MPL_IDN_CA" }, | ||
13503 | { 15360, "MPL_UDN_CA_SET_0" }, | ||
13504 | { 15361, "MPL_UDN_CA_SET_1" }, | ||
13505 | { 15362, "MPL_UDN_CA_SET_2" }, | ||
13506 | { 15363, "MPL_UDN_CA_SET_3" }, | ||
13507 | { 15364, "MPL_UDN_CA" }, | ||
13508 | { 15872, "MPL_IDN_AVAIL_SET_0" }, | ||
13509 | { 15873, "MPL_IDN_AVAIL_SET_1" }, | ||
13510 | { 15874, "MPL_IDN_AVAIL_SET_2" }, | ||
13511 | { 15875, "MPL_IDN_AVAIL_SET_3" }, | ||
13512 | { 15876, "MPL_IDN_AVAIL" }, | ||
13513 | { 15877, "IDN_AVAIL_EN" }, | ||
13514 | { 16384, "MPL_UDN_AVAIL_SET_0" }, | ||
13515 | { 16385, "MPL_UDN_AVAIL_SET_1" }, | ||
13516 | { 16386, "MPL_UDN_AVAIL_SET_2" }, | ||
13517 | { 16387, "MPL_UDN_AVAIL_SET_3" }, | ||
13518 | { 16388, "MPL_UDN_AVAIL" }, | ||
13519 | { 16389, "UDN_AVAIL_EN" }, | ||
13520 | { 16896, "MPL_PERF_COUNT_SET_0" }, | ||
13521 | { 16897, "MPL_PERF_COUNT_SET_1" }, | ||
13522 | { 16898, "MPL_PERF_COUNT_SET_2" }, | ||
13523 | { 16899, "MPL_PERF_COUNT_SET_3" }, | ||
13524 | { 16900, "MPL_PERF_COUNT" }, | ||
13525 | { 16901, "PERF_COUNT_0" }, | ||
13526 | { 16902, "PERF_COUNT_1" }, | ||
13527 | { 16903, "PERF_COUNT_CTL" }, | ||
13528 | { 16904, "PERF_COUNT_STS" }, | ||
13529 | { 16905, "WATCH_CTL" }, | ||
13530 | { 16906, "WATCH_MASK" }, | ||
13531 | { 16907, "WATCH_VAL" }, | ||
13532 | { 16912, "PERF_COUNT_DN_CTL" }, | ||
13533 | { 17408, "MPL_INTCTRL_3_SET_0" }, | ||
13534 | { 17409, "MPL_INTCTRL_3_SET_1" }, | ||
13535 | { 17410, "MPL_INTCTRL_3_SET_2" }, | ||
13536 | { 17411, "MPL_INTCTRL_3_SET_3" }, | ||
13537 | { 17412, "MPL_INTCTRL_3" }, | ||
13538 | { 17413, "EX_CONTEXT_3_0" }, | ||
13539 | { 17414, "EX_CONTEXT_3_1" }, | ||
13540 | { 17415, "INTERRUPT_MASK_3_0" }, | ||
13541 | { 17416, "INTERRUPT_MASK_3_1" }, | ||
13542 | { 17417, "INTERRUPT_MASK_RESET_3_0" }, | ||
13543 | { 17418, "INTERRUPT_MASK_RESET_3_1" }, | ||
13544 | { 17419, "INTERRUPT_MASK_SET_3_0" }, | ||
13545 | { 17420, "INTERRUPT_MASK_SET_3_1" }, | ||
13546 | { 17432, "INTCTRL_3_STATUS" }, | ||
13547 | { 17664, "SYSTEM_SAVE_3_0" }, | ||
13548 | { 17665, "SYSTEM_SAVE_3_1" }, | ||
13549 | { 17666, "SYSTEM_SAVE_3_2" }, | ||
13550 | { 17667, "SYSTEM_SAVE_3_3" }, | ||
13551 | { 17920, "MPL_INTCTRL_2_SET_0" }, | ||
13552 | { 17921, "MPL_INTCTRL_2_SET_1" }, | ||
13553 | { 17922, "MPL_INTCTRL_2_SET_2" }, | ||
13554 | { 17923, "MPL_INTCTRL_2_SET_3" }, | ||
13555 | { 17924, "MPL_INTCTRL_2" }, | ||
13556 | { 17925, "EX_CONTEXT_2_0" }, | ||
13557 | { 17926, "EX_CONTEXT_2_1" }, | ||
13558 | { 17927, "INTCTRL_2_STATUS" }, | ||
13559 | { 17928, "INTERRUPT_MASK_2_0" }, | ||
13560 | { 17929, "INTERRUPT_MASK_2_1" }, | ||
13561 | { 17930, "INTERRUPT_MASK_RESET_2_0" }, | ||
13562 | { 17931, "INTERRUPT_MASK_RESET_2_1" }, | ||
13563 | { 17932, "INTERRUPT_MASK_SET_2_0" }, | ||
13564 | { 17933, "INTERRUPT_MASK_SET_2_1" }, | ||
13565 | { 18176, "SYSTEM_SAVE_2_0" }, | ||
13566 | { 18177, "SYSTEM_SAVE_2_1" }, | ||
13567 | { 18178, "SYSTEM_SAVE_2_2" }, | ||
13568 | { 18179, "SYSTEM_SAVE_2_3" }, | ||
13569 | { 18432, "MPL_INTCTRL_1_SET_0" }, | ||
13570 | { 18433, "MPL_INTCTRL_1_SET_1" }, | ||
13571 | { 18434, "MPL_INTCTRL_1_SET_2" }, | ||
13572 | { 18435, "MPL_INTCTRL_1_SET_3" }, | ||
13573 | { 18436, "MPL_INTCTRL_1" }, | ||
13574 | { 18437, "EX_CONTEXT_1_0" }, | ||
13575 | { 18438, "EX_CONTEXT_1_1" }, | ||
13576 | { 18439, "INTCTRL_1_STATUS" }, | ||
13577 | { 18440, "INTCTRL_3_STATUS_REV0" }, | ||
13578 | { 18441, "INTERRUPT_MASK_1_0" }, | ||
13579 | { 18442, "INTERRUPT_MASK_1_1" }, | ||
13580 | { 18443, "INTERRUPT_MASK_RESET_1_0" }, | ||
13581 | { 18444, "INTERRUPT_MASK_RESET_1_1" }, | ||
13582 | { 18445, "INTERRUPT_MASK_SET_1_0" }, | ||
13583 | { 18446, "INTERRUPT_MASK_SET_1_1" }, | ||
13584 | { 18688, "SYSTEM_SAVE_1_0" }, | ||
13585 | { 18689, "SYSTEM_SAVE_1_1" }, | ||
13586 | { 18690, "SYSTEM_SAVE_1_2" }, | ||
13587 | { 18691, "SYSTEM_SAVE_1_3" }, | ||
13588 | { 18944, "MPL_INTCTRL_0_SET_0" }, | ||
13589 | { 18945, "MPL_INTCTRL_0_SET_1" }, | ||
13590 | { 18946, "MPL_INTCTRL_0_SET_2" }, | ||
13591 | { 18947, "MPL_INTCTRL_0_SET_3" }, | ||
13592 | { 18948, "MPL_INTCTRL_0" }, | ||
13593 | { 18949, "EX_CONTEXT_0_0" }, | ||
13594 | { 18950, "EX_CONTEXT_0_1" }, | ||
13595 | { 18951, "INTCTRL_0_STATUS" }, | ||
13596 | { 18952, "INTERRUPT_MASK_0_0" }, | ||
13597 | { 18953, "INTERRUPT_MASK_0_1" }, | ||
13598 | { 18954, "INTERRUPT_MASK_RESET_0_0" }, | ||
13599 | { 18955, "INTERRUPT_MASK_RESET_0_1" }, | ||
13600 | { 18956, "INTERRUPT_MASK_SET_0_0" }, | ||
13601 | { 18957, "INTERRUPT_MASK_SET_0_1" }, | ||
13602 | { 19200, "SYSTEM_SAVE_0_0" }, | ||
13603 | { 19201, "SYSTEM_SAVE_0_1" }, | ||
13604 | { 19202, "SYSTEM_SAVE_0_2" }, | ||
13605 | { 19203, "SYSTEM_SAVE_0_3" }, | ||
13606 | { 19456, "MPL_BOOT_ACCESS_SET_0" }, | ||
13607 | { 19457, "MPL_BOOT_ACCESS_SET_1" }, | ||
13608 | { 19458, "MPL_BOOT_ACCESS_SET_2" }, | ||
13609 | { 19459, "MPL_BOOT_ACCESS_SET_3" }, | ||
13610 | { 19460, "MPL_BOOT_ACCESS" }, | ||
13611 | { 19461, "CBOX_CACHEASRAM_CONFIG" }, | ||
13612 | { 19462, "CBOX_CACHE_CONFIG" }, | ||
13613 | { 19463, "CBOX_MMAP_0" }, | ||
13614 | { 19464, "CBOX_MMAP_1" }, | ||
13615 | { 19465, "CBOX_MMAP_2" }, | ||
13616 | { 19466, "CBOX_MMAP_3" }, | ||
13617 | { 19467, "CBOX_MSR" }, | ||
13618 | { 19468, "CBOX_SRC_ID" }, | ||
13619 | { 19469, "CYCLE_HIGH_MODIFY" }, | ||
13620 | { 19470, "CYCLE_LOW_MODIFY" }, | ||
13621 | { 19471, "DIAG_BCST_CTL" }, | ||
13622 | { 19472, "DIAG_BCST_MASK" }, | ||
13623 | { 19473, "DIAG_BCST_TRIGGER" }, | ||
13624 | { 19474, "DIAG_MUX_CTL" }, | ||
13625 | { 19475, "DIAG_TRACE_CTL" }, | ||
13626 | { 19476, "DIAG_TRACE_STS" }, | ||
13627 | { 19477, "IDN_DEMUX_BUF_THRESH" }, | ||
13628 | { 19478, "SBOX_CONFIG" }, | ||
13629 | { 19479, "TILE_COORD" }, | ||
13630 | { 19480, "UDN_DEMUX_BUF_THRESH" }, | ||
13631 | { 19481, "CBOX_HOME_MAP_ADDR" }, | ||
13632 | { 19482, "CBOX_HOME_MAP_DATA" }, | ||
13633 | { 19483, "CBOX_MSR1" }, | ||
13634 | { 19484, "BIG_ENDIAN_CONFIG" }, | ||
13635 | { 19485, "MEM_STRIPE_CONFIG" }, | ||
13636 | { 19486, "DIAG_TRACE_WAY" }, | ||
13637 | { 19487, "VDN_SNOOP_SHIM_CTL" }, | ||
13638 | { 19488, "PERF_COUNT_PLS" }, | ||
13639 | { 19489, "DIAG_TRACE_DATA" }, | ||
13640 | { 19712, "I_AER_0" }, | ||
13641 | { 19713, "I_AER_1" }, | ||
13642 | { 19714, "I_PHYSICAL_MEMORY_MODE" }, | ||
13643 | { 19968, "MPL_WORLD_ACCESS_SET_0" }, | ||
13644 | { 19969, "MPL_WORLD_ACCESS_SET_1" }, | ||
13645 | { 19970, "MPL_WORLD_ACCESS_SET_2" }, | ||
13646 | { 19971, "MPL_WORLD_ACCESS_SET_3" }, | ||
13647 | { 19972, "MPL_WORLD_ACCESS" }, | ||
13648 | { 19973, "SIM_SOCKET" }, | ||
13649 | { 19974, "CYCLE_HIGH" }, | ||
13650 | { 19975, "CYCLE_LOW" }, | ||
13651 | { 19976, "DONE" }, | ||
13652 | { 19977, "FAIL" }, | ||
13653 | { 19978, "INTERRUPT_CRITICAL_SECTION" }, | ||
13654 | { 19979, "PASS" }, | ||
13655 | { 19980, "SIM_CONTROL" }, | ||
13656 | { 19981, "EVENT_BEGIN" }, | ||
13657 | { 19982, "EVENT_END" }, | ||
13658 | { 19983, "TILE_WRITE_PENDING" }, | ||
13659 | { 19984, "TILE_RTF_HWM" }, | ||
13660 | { 20224, "PROC_STATUS" }, | ||
13661 | { 20225, "STATUS_SATURATE" }, | ||
13662 | { 20480, "MPL_I_ASID_SET_0" }, | ||
13663 | { 20481, "MPL_I_ASID_SET_1" }, | ||
13664 | { 20482, "MPL_I_ASID_SET_2" }, | ||
13665 | { 20483, "MPL_I_ASID_SET_3" }, | ||
13666 | { 20484, "MPL_I_ASID" }, | ||
13667 | { 20485, "I_ASID" }, | ||
13668 | { 20992, "MPL_D_ASID_SET_0" }, | ||
13669 | { 20993, "MPL_D_ASID_SET_1" }, | ||
13670 | { 20994, "MPL_D_ASID_SET_2" }, | ||
13671 | { 20995, "MPL_D_ASID_SET_3" }, | ||
13672 | { 20996, "MPL_D_ASID" }, | ||
13673 | { 20997, "D_ASID" }, | ||
13674 | { 21504, "MPL_DMA_ASID_SET_0" }, | ||
13675 | { 21505, "MPL_DMA_ASID_SET_1" }, | ||
13676 | { 21506, "MPL_DMA_ASID_SET_2" }, | ||
13677 | { 21507, "MPL_DMA_ASID_SET_3" }, | ||
13678 | { 21508, "MPL_DMA_ASID" }, | ||
13679 | { 21509, "DMA_ASID" }, | ||
13680 | { 22016, "MPL_SNI_ASID_SET_0" }, | ||
13681 | { 22017, "MPL_SNI_ASID_SET_1" }, | ||
13682 | { 22018, "MPL_SNI_ASID_SET_2" }, | ||
13683 | { 22019, "MPL_SNI_ASID_SET_3" }, | ||
13684 | { 22020, "MPL_SNI_ASID" }, | ||
13685 | { 22021, "SNI_ASID" }, | ||
13686 | { 22528, "MPL_DMA_CPL_SET_0" }, | ||
13687 | { 22529, "MPL_DMA_CPL_SET_1" }, | ||
13688 | { 22530, "MPL_DMA_CPL_SET_2" }, | ||
13689 | { 22531, "MPL_DMA_CPL_SET_3" }, | ||
13690 | { 22532, "MPL_DMA_CPL" }, | ||
13691 | { 23040, "MPL_SN_CPL_SET_0" }, | ||
13692 | { 23041, "MPL_SN_CPL_SET_1" }, | ||
13693 | { 23042, "MPL_SN_CPL_SET_2" }, | ||
13694 | { 23043, "MPL_SN_CPL_SET_3" }, | ||
13695 | { 23044, "MPL_SN_CPL" }, | ||
13696 | { 23552, "MPL_DOUBLE_FAULT_SET_0" }, | ||
13697 | { 23553, "MPL_DOUBLE_FAULT_SET_1" }, | ||
13698 | { 23554, "MPL_DOUBLE_FAULT_SET_2" }, | ||
13699 | { 23555, "MPL_DOUBLE_FAULT_SET_3" }, | ||
13700 | { 23556, "MPL_DOUBLE_FAULT" }, | ||
13701 | { 23557, "LAST_INTERRUPT_REASON" }, | ||
13702 | { 24064, "MPL_SN_STATIC_ACCESS_SET_0" }, | ||
13703 | { 24065, "MPL_SN_STATIC_ACCESS_SET_1" }, | ||
13704 | { 24066, "MPL_SN_STATIC_ACCESS_SET_2" }, | ||
13705 | { 24067, "MPL_SN_STATIC_ACCESS_SET_3" }, | ||
13706 | { 24068, "MPL_SN_STATIC_ACCESS" }, | ||
13707 | { 24069, "SN_STATIC_CTL" }, | ||
13708 | { 24070, "SN_STATIC_FIFO_DATA" }, | ||
13709 | { 24071, "SN_STATIC_FIFO_SEL" }, | ||
13710 | { 24073, "SN_STATIC_ISTATE" }, | ||
13711 | { 24074, "SN_STATIC_OSTATE" }, | ||
13712 | { 24076, "SN_STATIC_STATIC" }, | ||
13713 | { 24320, "SN_STATIC_DATA_AVAIL" }, | ||
13714 | { 24576, "MPL_AUX_PERF_COUNT_SET_0" }, | ||
13715 | { 24577, "MPL_AUX_PERF_COUNT_SET_1" }, | ||
13716 | { 24578, "MPL_AUX_PERF_COUNT_SET_2" }, | ||
13717 | { 24579, "MPL_AUX_PERF_COUNT_SET_3" }, | ||
13718 | { 24580, "MPL_AUX_PERF_COUNT" }, | ||
13719 | { 24581, "AUX_PERF_COUNT_0" }, | ||
13720 | { 24582, "AUX_PERF_COUNT_1" }, | ||
13721 | { 24583, "AUX_PERF_COUNT_CTL" }, | ||
13722 | { 24584, "AUX_PERF_COUNT_STS" }, | ||
13723 | }; | ||
13724 | |||
13725 | const int tile_num_sprs = 499; | ||
13726 | |||
13727 | |||
13728 | |||
13729 | |||
13730 | /* Canonical name of each register. */ | ||
13731 | const char *const tile_register_names[] = | ||
13732 | { | ||
13733 | "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | ||
13734 | "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", | ||
13735 | "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", | ||
13736 | "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", | ||
13737 | "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", | ||
13738 | "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", | ||
13739 | "r48", "r49", "r50", "r51", "r52", "tp", "sp", "lr", | ||
13740 | "sn", "idn0", "idn1", "udn0", "udn1", "udn2", "udn3", "zero" | ||
13741 | }; | ||
13742 | |||
13743 | |||
13744 | /* Given a set of bundle bits and the lookup FSM for a specific pipe, | ||
13745 | * returns which instruction the bundle contains in that pipe. | ||
13746 | */ | ||
13747 | static const struct tile_opcode * | ||
13748 | find_opcode(tile_bundle_bits bits, const unsigned short *table) | ||
13749 | { | ||
13750 | int index = 0; | ||
13751 | |||
13752 | while (1) | ||
13753 | { | ||
13754 | unsigned short bitspec = table[index]; | ||
13755 | unsigned int bitfield = | ||
13756 | ((unsigned int)(bits >> (bitspec & 63))) & (bitspec >> 6); | ||
13757 | |||
13758 | unsigned short next = table[index + 1 + bitfield]; | ||
13759 | if (next <= TILE_OPC_NONE) | ||
13760 | return &tile_opcodes[next]; | ||
13761 | |||
13762 | index = next - TILE_OPC_NONE; | ||
13763 | } | ||
13764 | } | ||
13765 | |||
13766 | |||
13767 | int | ||
13768 | parse_insn_tile(tile_bundle_bits bits, | ||
13769 | unsigned int pc, | ||
13770 | struct tile_decoded_instruction | ||
13771 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]) | ||
13772 | { | ||
13773 | int num_instructions = 0; | ||
13774 | int pipe; | ||
13775 | |||
13776 | int min_pipe, max_pipe; | ||
13777 | if ((bits & TILE_BUNDLE_Y_ENCODING_MASK) == 0) | ||
13778 | { | ||
13779 | min_pipe = TILE_PIPELINE_X0; | ||
13780 | max_pipe = TILE_PIPELINE_X1; | ||
13781 | } | ||
13782 | else | ||
13783 | { | ||
13784 | min_pipe = TILE_PIPELINE_Y0; | ||
13785 | max_pipe = TILE_PIPELINE_Y2; | ||
13786 | } | ||
13787 | |||
13788 | /* For each pipe, find an instruction that fits. */ | ||
13789 | for (pipe = min_pipe; pipe <= max_pipe; pipe++) | ||
13790 | { | ||
13791 | const struct tile_opcode *opc; | ||
13792 | struct tile_decoded_instruction *d; | ||
13793 | int i; | ||
13794 | |||
13795 | d = &decoded[num_instructions++]; | ||
13796 | opc = find_opcode (bits, tile_bundle_decoder_fsms[pipe]); | ||
13797 | d->opcode = opc; | ||
13798 | |||
13799 | /* Decode each operand, sign extending, etc. as appropriate. */ | ||
13800 | for (i = 0; i < opc->num_operands; i++) | ||
13801 | { | ||
13802 | const struct tile_operand *op = | ||
13803 | &tile_operands[opc->operands[pipe][i]]; | ||
13804 | int opval = op->extract (bits); | ||
13805 | if (op->is_signed) | ||
13806 | { | ||
13807 | /* Sign-extend the operand. */ | ||
13808 | int shift = (int)((sizeof(int) * 8) - op->num_bits); | ||
13809 | opval = (opval << shift) >> shift; | ||
13810 | } | ||
13811 | |||
13812 | /* Adjust PC-relative scaled branch offsets. */ | ||
13813 | if (op->type == TILE_OP_TYPE_ADDRESS) | ||
13814 | { | ||
13815 | opval *= TILE_BUNDLE_SIZE_IN_BYTES; | ||
13816 | opval += (int)pc; | ||
13817 | } | ||
13818 | |||
13819 | /* Record the final value. */ | ||
13820 | d->operands[i] = op; | ||
13821 | d->operand_values[i] = opval; | ||
13822 | } | ||
13823 | } | ||
13824 | |||
13825 | return num_instructions; | ||
13826 | } | ||
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c new file mode 100644 index 000000000000..47500a324e32 --- /dev/null +++ b/arch/tile/kernel/time.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Support the cycle counter clocksource and tile timer clock event device. | ||
15 | */ | ||
16 | |||
17 | #include <linux/time.h> | ||
18 | #include <linux/timex.h> | ||
19 | #include <linux/clocksource.h> | ||
20 | #include <linux/clockchips.h> | ||
21 | #include <linux/hardirq.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <asm/irq_regs.h> | ||
26 | #include <hv/hypervisor.h> | ||
27 | #include <arch/interrupts.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | |||
31 | /* | ||
32 | * Define the cycle counter clock source. | ||
33 | */ | ||
34 | |||
35 | /* How many cycles per second we are running at. */ | ||
36 | static cycles_t cycles_per_sec __write_once; | ||
37 | |||
38 | /* | ||
39 | * We set up shift and multiply values with a minsec of five seconds, | ||
40 | * since our timer counter counts down 31 bits at a frequency of | ||
41 | * no less than 500 MHz. See @minsec for clocks_calc_mult_shift(). | ||
42 | * We could use a different value for the 64-bit free-running | ||
43 | * cycle counter, but we use the same one for consistency, and since | ||
44 | * we will be reasonably precise with this value anyway. | ||
45 | */ | ||
46 | #define TILE_MINSEC 5 | ||
47 | |||
48 | cycles_t get_clock_rate() | ||
49 | { | ||
50 | return cycles_per_sec; | ||
51 | } | ||
52 | |||
53 | #if CHIP_HAS_SPLIT_CYCLE() | ||
54 | cycles_t get_cycles() | ||
55 | { | ||
56 | unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH); | ||
57 | unsigned int low = __insn_mfspr(SPR_CYCLE_LOW); | ||
58 | unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH); | ||
59 | |||
60 | while (unlikely(high != high2)) { | ||
61 | low = __insn_mfspr(SPR_CYCLE_LOW); | ||
62 | high = high2; | ||
63 | high2 = __insn_mfspr(SPR_CYCLE_HIGH); | ||
64 | } | ||
65 | |||
66 | return (((cycles_t)high) << 32) | low; | ||
67 | } | ||
68 | #endif | ||
69 | |||
70 | cycles_t clocksource_get_cycles(struct clocksource *cs) | ||
71 | { | ||
72 | return get_cycles(); | ||
73 | } | ||
74 | |||
75 | static struct clocksource cycle_counter_cs = { | ||
76 | .name = "cycle counter", | ||
77 | .rating = 300, | ||
78 | .read = clocksource_get_cycles, | ||
79 | .mask = CLOCKSOURCE_MASK(64), | ||
80 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
81 | }; | ||
82 | |||
83 | /* | ||
84 | * Called very early from setup_arch() to set cycles_per_sec. | ||
85 | * We initialize it early so we can use it to set up loops_per_jiffy. | ||
86 | */ | ||
87 | void __init setup_clock(void) | ||
88 | { | ||
89 | cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); | ||
90 | clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, | ||
91 | TILE_MINSEC); | ||
92 | } | ||
93 | |||
94 | void __init calibrate_delay(void) | ||
95 | { | ||
96 | loops_per_jiffy = get_clock_rate() / HZ; | ||
97 | pr_info("Clock rate yields %lu.%02lu BogoMIPS (lpj=%lu)\n", | ||
98 | loops_per_jiffy/(500000/HZ), | ||
99 | (loops_per_jiffy/(5000/HZ)) % 100, loops_per_jiffy); | ||
100 | } | ||
101 | |||
102 | /* Called fairly late in init/main.c, but before we go smp. */ | ||
103 | void __init time_init(void) | ||
104 | { | ||
105 | /* Initialize and register the clock source. */ | ||
106 | clocksource_register(&cycle_counter_cs); | ||
107 | |||
108 | /* Start up the tile-timer interrupt source on the boot cpu. */ | ||
109 | setup_tile_timer(); | ||
110 | } | ||
111 | |||
112 | |||
113 | /* | ||
114 | * Define the tile timer clock event device. The timer is driven by | ||
115 | * the TILE_TIMER_CONTROL register, which consists of a 31-bit down | ||
116 | * counter, plus bit 31, which signifies that the counter has wrapped | ||
117 | * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be | ||
118 | * raised as long as bit 31 is set. | ||
119 | */ | ||
120 | |||
121 | #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ | ||
122 | |||
123 | static int tile_timer_set_next_event(unsigned long ticks, | ||
124 | struct clock_event_device *evt) | ||
125 | { | ||
126 | BUG_ON(ticks > MAX_TICK); | ||
127 | __insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks); | ||
128 | raw_local_irq_unmask_now(INT_TILE_TIMER); | ||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Whenever anyone tries to change modes, we just mask interrupts | ||
134 | * and wait for the next event to get set. | ||
135 | */ | ||
136 | static void tile_timer_set_mode(enum clock_event_mode mode, | ||
137 | struct clock_event_device *evt) | ||
138 | { | ||
139 | raw_local_irq_mask_now(INT_TILE_TIMER); | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * Set min_delta_ns to 1 microsecond, since it takes about | ||
144 | * that long to fire the interrupt. | ||
145 | */ | ||
146 | static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = { | ||
147 | .name = "tile timer", | ||
148 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
149 | .min_delta_ns = 1000, | ||
150 | .rating = 100, | ||
151 | .irq = -1, | ||
152 | .set_next_event = tile_timer_set_next_event, | ||
153 | .set_mode = tile_timer_set_mode, | ||
154 | }; | ||
155 | |||
156 | void __cpuinit setup_tile_timer(void) | ||
157 | { | ||
158 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | ||
159 | |||
160 | /* Fill in fields that are speed-specific. */ | ||
161 | clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); | ||
162 | evt->max_delta_ns = clockevent_delta2ns(MAX_TICK, evt); | ||
163 | |||
164 | /* Mark as being for this cpu only. */ | ||
165 | evt->cpumask = cpumask_of(smp_processor_id()); | ||
166 | |||
167 | /* Start out with timer not firing. */ | ||
168 | raw_local_irq_mask_now(INT_TILE_TIMER); | ||
169 | |||
170 | /* Register tile timer. */ | ||
171 | clockevents_register_device(evt); | ||
172 | } | ||
173 | |||
174 | /* Called from the interrupt vector. */ | ||
175 | void do_timer_interrupt(struct pt_regs *regs, int fault_num) | ||
176 | { | ||
177 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
178 | struct clock_event_device *evt = &__get_cpu_var(tile_timer); | ||
179 | |||
180 | /* | ||
181 | * Mask the timer interrupt here, since we are a oneshot timer | ||
182 | * and there are now by definition no events pending. | ||
183 | */ | ||
184 | raw_local_irq_mask(INT_TILE_TIMER); | ||
185 | |||
186 | /* Track time spent here in an interrupt context */ | ||
187 | irq_enter(); | ||
188 | |||
189 | /* Track interrupt count. */ | ||
190 | __get_cpu_var(irq_stat).irq_timer_count++; | ||
191 | |||
192 | /* Call the generic timer handler */ | ||
193 | evt->event_handler(evt); | ||
194 | |||
195 | /* | ||
196 | * Track time spent against the current process again and | ||
197 | * process any softirqs if they are waiting. | ||
198 | */ | ||
199 | irq_exit(); | ||
200 | |||
201 | set_irq_regs(old_regs); | ||
202 | } | ||
203 | |||
204 | /* | ||
205 | * Scheduler clock - returns current time in nanosec units. | ||
206 | * Note that with LOCKDEP, this is called during lockdep_init(), and | ||
207 | * we will claim that sched_clock() is zero for a little while, until | ||
208 | * we run setup_clock(), above. | ||
209 | */ | ||
210 | unsigned long long sched_clock(void) | ||
211 | { | ||
212 | return clocksource_cyc2ns(get_cycles(), | ||
213 | cycle_counter_cs.mult, | ||
214 | cycle_counter_cs.shift); | ||
215 | } | ||
216 | |||
217 | int setup_profiling_timer(unsigned int multiplier) | ||
218 | { | ||
219 | return -EINVAL; | ||
220 | } | ||
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c new file mode 100644 index 000000000000..2dffc1044d83 --- /dev/null +++ b/arch/tile/kernel/tlb.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/cpumask.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/homecache.h> | ||
20 | #include <hv/hypervisor.h> | ||
21 | |||
22 | /* From tlbflush.h */ | ||
23 | DEFINE_PER_CPU(int, current_asid); | ||
24 | int min_asid, max_asid; | ||
25 | |||
26 | /* | ||
27 | * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB | ||
28 | * so that when we are unmapping an executable page, we also flush it. | ||
29 | * Combined with flushing the L1I at context switch time, this means | ||
30 | * we don't have to do any other icache flushes. | ||
31 | */ | ||
32 | |||
33 | void flush_tlb_mm(struct mm_struct *mm) | ||
34 | { | ||
35 | HV_Remote_ASID asids[NR_CPUS]; | ||
36 | int i = 0, cpu; | ||
37 | for_each_cpu(cpu, &mm->cpu_vm_mask) { | ||
38 | HV_Remote_ASID *asid = &asids[i++]; | ||
39 | asid->y = cpu / smp_topology.width; | ||
40 | asid->x = cpu % smp_topology.width; | ||
41 | asid->asid = per_cpu(current_asid, cpu); | ||
42 | } | ||
43 | flush_remote(0, HV_FLUSH_EVICT_L1I, &mm->cpu_vm_mask, | ||
44 | 0, 0, 0, NULL, asids, i); | ||
45 | } | ||
46 | |||
47 | void flush_tlb_current_task(void) | ||
48 | { | ||
49 | flush_tlb_mm(current->mm); | ||
50 | } | ||
51 | |||
52 | void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm, | ||
53 | unsigned long va) | ||
54 | { | ||
55 | unsigned long size = hv_page_size(vma); | ||
56 | int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; | ||
57 | flush_remote(0, cache, &mm->cpu_vm_mask, | ||
58 | va, size, size, &mm->cpu_vm_mask, NULL, 0); | ||
59 | } | ||
60 | |||
61 | void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) | ||
62 | { | ||
63 | flush_tlb_page_mm(vma, vma->vm_mm, va); | ||
64 | } | ||
65 | EXPORT_SYMBOL(flush_tlb_page); | ||
66 | |||
67 | void flush_tlb_range(const struct vm_area_struct *vma, | ||
68 | unsigned long start, unsigned long end) | ||
69 | { | ||
70 | unsigned long size = hv_page_size(vma); | ||
71 | struct mm_struct *mm = vma->vm_mm; | ||
72 | int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; | ||
73 | flush_remote(0, cache, &mm->cpu_vm_mask, start, end - start, size, | ||
74 | &mm->cpu_vm_mask, NULL, 0); | ||
75 | } | ||
76 | |||
77 | void flush_tlb_all(void) | ||
78 | { | ||
79 | int i; | ||
80 | for (i = 0; ; ++i) { | ||
81 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
82 | if (r.size == 0) | ||
83 | break; | ||
84 | flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, | ||
85 | r.start, r.size, PAGE_SIZE, cpu_online_mask, | ||
86 | NULL, 0); | ||
87 | flush_remote(0, 0, NULL, | ||
88 | r.start, r.size, HPAGE_SIZE, cpu_online_mask, | ||
89 | NULL, 0); | ||
90 | } | ||
91 | } | ||
92 | |||
93 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
94 | { | ||
95 | flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, | ||
96 | start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); | ||
97 | } | ||
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c new file mode 100644 index 000000000000..12cb10f38527 --- /dev/null +++ b/arch/tile/kernel/traps.c | |||
@@ -0,0 +1,237 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/reboot.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | #include <linux/ptrace.h> | ||
22 | #include <asm/opcode-tile.h> | ||
23 | |||
24 | #include <arch/interrupts.h> | ||
25 | #include <arch/spr_def.h> | ||
26 | |||
27 | void __init trap_init(void) | ||
28 | { | ||
29 | /* Nothing needed here since we link code at .intrpt1 */ | ||
30 | } | ||
31 | |||
32 | int unaligned_fixup = 1; | ||
33 | |||
34 | static int __init setup_unaligned_fixup(char *str) | ||
35 | { | ||
36 | /* | ||
37 | * Say "=-1" to completely disable it. If you just do "=0", we | ||
38 | * will still parse the instruction, then fire a SIGBUS with | ||
39 | * the correct address from inside the single_step code. | ||
40 | */ | ||
41 | long val; | ||
42 | if (strict_strtol(str, 0, &val) != 0) | ||
43 | return 0; | ||
44 | unaligned_fixup = val; | ||
45 | printk("Fixups for unaligned data accesses are %s\n", | ||
46 | unaligned_fixup >= 0 ? | ||
47 | (unaligned_fixup ? "enabled" : "disabled") : | ||
48 | "completely disabled"); | ||
49 | return 1; | ||
50 | } | ||
51 | __setup("unaligned_fixup=", setup_unaligned_fixup); | ||
52 | |||
53 | #if CHIP_HAS_TILE_DMA() | ||
54 | |||
55 | static int dma_disabled; | ||
56 | |||
57 | static int __init nodma(char *str) | ||
58 | { | ||
59 | printk("User-space DMA is disabled\n"); | ||
60 | dma_disabled = 1; | ||
61 | return 1; | ||
62 | } | ||
63 | __setup("nodma", nodma); | ||
64 | |||
65 | /* How to decode SPR_GPV_REASON */ | ||
66 | #define IRET_ERROR (1U << 31) | ||
67 | #define MT_ERROR (1U << 30) | ||
68 | #define MF_ERROR (1U << 29) | ||
69 | #define SPR_INDEX ((1U << 15) - 1) | ||
70 | #define SPR_MPL_SHIFT 9 /* starting bit position for MPL encoded in SPR */ | ||
71 | |||
72 | /* | ||
73 | * See if this GPV is just to notify the kernel of SPR use and we can | ||
74 | * retry the user instruction after adjusting some MPLs suitably. | ||
75 | */ | ||
76 | static int retry_gpv(unsigned int gpv_reason) | ||
77 | { | ||
78 | int mpl; | ||
79 | |||
80 | if (gpv_reason & IRET_ERROR) | ||
81 | return 0; | ||
82 | |||
83 | BUG_ON((gpv_reason & (MT_ERROR|MF_ERROR)) == 0); | ||
84 | mpl = (gpv_reason & SPR_INDEX) >> SPR_MPL_SHIFT; | ||
85 | if (mpl == INT_DMA_NOTIFY && !dma_disabled) { | ||
86 | /* User is turning on DMA. Allow it and retry. */ | ||
87 | printk(KERN_DEBUG "Process %d/%s is now enabled for DMA\n", | ||
88 | current->pid, current->comm); | ||
89 | BUG_ON(current->thread.tile_dma_state.enabled); | ||
90 | current->thread.tile_dma_state.enabled = 1; | ||
91 | grant_dma_mpls(); | ||
92 | return 1; | ||
93 | } | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | #endif /* CHIP_HAS_TILE_DMA() */ | ||
99 | |||
100 | /* Defined inside do_trap(), below. */ | ||
101 | #ifdef __tilegx__ | ||
102 | extern tilegx_bundle_bits bpt_code; | ||
103 | #else | ||
104 | extern tile_bundle_bits bpt_code; | ||
105 | #endif | ||
106 | |||
107 | void __kprobes do_trap(struct pt_regs *regs, int fault_num, | ||
108 | unsigned long reason) | ||
109 | { | ||
110 | siginfo_t info = { 0 }; | ||
111 | int signo, code; | ||
112 | unsigned long address; | ||
113 | __typeof__(bpt_code) instr; | ||
114 | |||
115 | /* Re-enable interrupts. */ | ||
116 | local_irq_enable(); | ||
117 | |||
118 | /* | ||
119 | * If it hits in kernel mode and we can't fix it up, just exit the | ||
120 | * current process and hope for the best. | ||
121 | */ | ||
122 | if (!user_mode(regs)) { | ||
123 | if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ | ||
124 | return; | ||
125 | printk(KERN_ALERT "Kernel took bad trap %d at PC %#lx\n", | ||
126 | fault_num, regs->pc); | ||
127 | if (fault_num == INT_GPV) | ||
128 | printk(KERN_ALERT "GPV_REASON is %#lx\n", reason); | ||
129 | show_regs(regs); | ||
130 | do_exit(SIGKILL); /* FIXME: implement i386 die() */ | ||
131 | return; | ||
132 | } | ||
133 | |||
134 | switch (fault_num) { | ||
135 | case INT_ILL: | ||
136 | asm(".pushsection .rodata.bpt_code,\"a\";" | ||
137 | ".align 8;" | ||
138 | "bpt_code: bpt;" | ||
139 | ".size bpt_code,.-bpt_code;" | ||
140 | ".popsection"); | ||
141 | |||
142 | if (copy_from_user(&instr, (void *)regs->pc, sizeof(instr))) { | ||
143 | printk(KERN_ERR "Unreadable instruction for INT_ILL:" | ||
144 | " %#lx\n", regs->pc); | ||
145 | do_exit(SIGKILL); | ||
146 | return; | ||
147 | } | ||
148 | if (instr == bpt_code) { | ||
149 | signo = SIGTRAP; | ||
150 | code = TRAP_BRKPT; | ||
151 | } else { | ||
152 | signo = SIGILL; | ||
153 | code = ILL_ILLOPC; | ||
154 | } | ||
155 | address = regs->pc; | ||
156 | break; | ||
157 | case INT_GPV: | ||
158 | #if CHIP_HAS_TILE_DMA() | ||
159 | if (retry_gpv(reason)) | ||
160 | return; | ||
161 | #endif | ||
162 | /*FALLTHROUGH*/ | ||
163 | case INT_UDN_ACCESS: | ||
164 | case INT_IDN_ACCESS: | ||
165 | #if CHIP_HAS_SN() | ||
166 | case INT_SN_ACCESS: | ||
167 | #endif | ||
168 | signo = SIGILL; | ||
169 | code = ILL_PRVREG; | ||
170 | address = regs->pc; | ||
171 | break; | ||
172 | case INT_SWINT_3: | ||
173 | case INT_SWINT_2: | ||
174 | case INT_SWINT_0: | ||
175 | signo = SIGILL; | ||
176 | code = ILL_ILLTRP; | ||
177 | address = regs->pc; | ||
178 | break; | ||
179 | case INT_UNALIGN_DATA: | ||
180 | #ifndef __tilegx__ /* FIXME: GX: no single-step yet */ | ||
181 | if (unaligned_fixup >= 0) { | ||
182 | struct single_step_state *state = | ||
183 | current_thread_info()->step_state; | ||
184 | if (!state || (void *)(regs->pc) != state->buffer) { | ||
185 | single_step_once(regs); | ||
186 | return; | ||
187 | } | ||
188 | } | ||
189 | #endif | ||
190 | signo = SIGBUS; | ||
191 | code = BUS_ADRALN; | ||
192 | address = 0; | ||
193 | break; | ||
194 | case INT_DOUBLE_FAULT: | ||
195 | /* | ||
196 | * For double fault, "reason" is actually passed as | ||
197 | * SYSTEM_SAVE_1_2, the hypervisor's double-fault info, so | ||
198 | * we can provide the original fault number rather than | ||
199 | * the uninteresting "INT_DOUBLE_FAULT" so the user can | ||
200 | * learn what actually struck while PL0 ICS was set. | ||
201 | */ | ||
202 | fault_num = reason; | ||
203 | signo = SIGILL; | ||
204 | code = ILL_DBLFLT; | ||
205 | address = regs->pc; | ||
206 | break; | ||
207 | #ifdef __tilegx__ | ||
208 | case INT_ILL_TRANS: | ||
209 | signo = SIGSEGV; | ||
210 | code = SEGV_MAPERR; | ||
211 | if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK) | ||
212 | address = regs->pc; | ||
213 | else | ||
214 | address = 0; /* FIXME: GX: single-step for address */ | ||
215 | break; | ||
216 | #endif | ||
217 | default: | ||
218 | panic("Unexpected do_trap interrupt number %d", fault_num); | ||
219 | return; | ||
220 | } | ||
221 | |||
222 | info.si_signo = signo; | ||
223 | info.si_code = code; | ||
224 | info.si_addr = (void *)address; | ||
225 | if (signo == SIGILL) | ||
226 | info.si_trapno = fault_num; | ||
227 | force_sig_info(signo, &info, current); | ||
228 | } | ||
229 | |||
230 | extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52); | ||
231 | |||
232 | void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) | ||
233 | { | ||
234 | _dump_stack(dummy, pc, lr, sp, r52); | ||
235 | printk("Double fault: exiting\n"); | ||
236 | machine_halt(); | ||
237 | } | ||
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..77388c1415bd --- /dev/null +++ b/arch/tile/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,98 @@ | |||
1 | #include <asm-generic/vmlinux.lds.h> | ||
2 | #include <asm/page.h> | ||
3 | #include <asm/cache.h> | ||
4 | #include <asm/thread_info.h> | ||
5 | #include <hv/hypervisor.h> | ||
6 | |||
7 | /* Text loads starting from the supervisor interrupt vector address. */ | ||
8 | #define TEXT_OFFSET MEM_SV_INTRPT | ||
9 | |||
10 | OUTPUT_ARCH(tile) | ||
11 | ENTRY(_start) | ||
12 | jiffies = jiffies_64; | ||
13 | |||
14 | PHDRS | ||
15 | { | ||
16 | intrpt1 PT_LOAD ; | ||
17 | text PT_LOAD ; | ||
18 | data PT_LOAD ; | ||
19 | } | ||
20 | SECTIONS | ||
21 | { | ||
22 | /* Text is loaded with a different VA than data; start with text. */ | ||
23 | #undef LOAD_OFFSET | ||
24 | #define LOAD_OFFSET TEXT_OFFSET | ||
25 | |||
26 | /* Interrupt vectors */ | ||
27 | .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ | ||
28 | { | ||
29 | _text = .; | ||
30 | _stext = .; | ||
31 | *(.intrpt1) | ||
32 | } :intrpt1 =0 | ||
33 | |||
34 | /* Hypervisor call vectors */ | ||
35 | #include "hvglue.lds" | ||
36 | |||
37 | /* Now the real code */ | ||
38 | . = ALIGN(0x20000); | ||
39 | HEAD_TEXT_SECTION :text =0 | ||
40 | .text : AT (ADDR(.text) - LOAD_OFFSET) { | ||
41 | SCHED_TEXT | ||
42 | LOCK_TEXT | ||
43 | __fix_text_end = .; /* tile-cpack won't rearrange before this */ | ||
44 | TEXT_TEXT | ||
45 | *(.text.*) | ||
46 | *(.coldtext*) | ||
47 | *(.fixup) | ||
48 | *(.gnu.warning) | ||
49 | } | ||
50 | _etext = .; | ||
51 | |||
52 | /* "Init" is divided into two areas with very different virtual addresses. */ | ||
53 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
54 | |||
55 | /* Now we skip back to PAGE_OFFSET for the data. */ | ||
56 | . = (. - TEXT_OFFSET + PAGE_OFFSET); | ||
57 | #undef LOAD_OFFSET | ||
58 | #define LOAD_OFFSET PAGE_OFFSET | ||
59 | |||
60 | . = ALIGN(PAGE_SIZE); | ||
61 | VMLINUX_SYMBOL(_sinitdata) = .; | ||
62 | .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) { | ||
63 | *(.init.page) | ||
64 | } :data =0 | ||
65 | INIT_DATA_SECTION(16) | ||
66 | PERCPU(PAGE_SIZE) | ||
67 | . = ALIGN(PAGE_SIZE); | ||
68 | VMLINUX_SYMBOL(_einitdata) = .; | ||
69 | |||
70 | _sdata = .; /* Start of data section */ | ||
71 | |||
72 | RO_DATA_SECTION(PAGE_SIZE) | ||
73 | |||
74 | /* initially writeable, then read-only */ | ||
75 | . = ALIGN(PAGE_SIZE); | ||
76 | __w1data_begin = .; | ||
77 | .w1data : AT(ADDR(.w1data) - LOAD_OFFSET) { | ||
78 | VMLINUX_SYMBOL(__w1data_begin) = .; | ||
79 | *(.w1data) | ||
80 | VMLINUX_SYMBOL(__w1data_end) = .; | ||
81 | } | ||
82 | |||
83 | RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) | ||
84 | |||
85 | _edata = .; | ||
86 | |||
87 | EXCEPTION_TABLE(L2_CACHE_BYTES) | ||
88 | NOTES | ||
89 | |||
90 | |||
91 | BSS_SECTION(8, PAGE_SIZE, 1) | ||
92 | _end = . ; | ||
93 | |||
94 | STABS_DEBUG | ||
95 | DWARF_DEBUG | ||
96 | |||
97 | DISCARDS | ||
98 | } | ||
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile new file mode 100644 index 000000000000..ea9c209d33fb --- /dev/null +++ b/arch/tile/lib/Makefile | |||
@@ -0,0 +1,16 @@ | |||
1 | # | ||
2 | # Makefile for TILE-specific library files.. | ||
3 | # | ||
4 | |||
5 | lib-y = checksum.o cpumask.o delay.o __invalidate_icache.o \ | ||
6 | mb_incoherent.o uaccess.o \ | ||
7 | memcpy_$(BITS).o memchr_$(BITS).o memmove_$(BITS).o memset_$(BITS).o \ | ||
8 | strchr_$(BITS).o strlen_$(BITS).o | ||
9 | |||
10 | ifneq ($(CONFIG_TILEGX),y) | ||
11 | lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o | ||
12 | endif | ||
13 | |||
14 | lib-$(CONFIG_SMP) += spinlock_$(BITS).o usercopy_$(BITS).o | ||
15 | |||
16 | obj-$(CONFIG_MODULES) += exports.o | ||
diff --git a/arch/tile/lib/__invalidate_icache.S b/arch/tile/lib/__invalidate_icache.S new file mode 100644 index 000000000000..92e705059127 --- /dev/null +++ b/arch/tile/lib/__invalidate_icache.S | |||
@@ -0,0 +1,106 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * A routine for synchronizing the instruction and data caches. | ||
14 | * Useful for self-modifying code. | ||
15 | * | ||
16 | * r0 holds the buffer address | ||
17 | * r1 holds the size in bytes | ||
18 | */ | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | #include <feedback.h> | ||
22 | |||
23 | #if defined(__NEWLIB__) || defined(__BME__) | ||
24 | #include <sys/page.h> | ||
25 | #else | ||
26 | #include <asm/page.h> | ||
27 | #endif | ||
28 | |||
29 | #ifdef __tilegx__ | ||
30 | /* Share code among Tile family chips but adjust opcodes appropriately. */ | ||
31 | #define slt cmpltu | ||
32 | #define bbst blbst | ||
33 | #define bnezt bnzt | ||
34 | #endif | ||
35 | |||
36 | #if defined(__tilegx__) && __SIZEOF_POINTER__ == 4 | ||
37 | /* Force 32-bit ops so pointers wrap around appropriately. */ | ||
38 | #define ADD_PTR addx | ||
39 | #define ADDI_PTR addxi | ||
40 | #else | ||
41 | #define ADD_PTR add | ||
42 | #define ADDI_PTR addi | ||
43 | #endif | ||
44 | |||
45 | .section .text.__invalidate_icache, "ax" | ||
46 | .global __invalidate_icache | ||
47 | .type __invalidate_icache,@function | ||
48 | .hidden __invalidate_icache | ||
49 | .align 8 | ||
50 | __invalidate_icache: | ||
51 | FEEDBACK_ENTER(__invalidate_icache) | ||
52 | { | ||
53 | ADD_PTR r1, r0, r1 /* end of buffer */ | ||
54 | blez r1, .Lexit /* skip out if size <= 0 */ | ||
55 | } | ||
56 | { | ||
57 | ADDI_PTR r1, r1, -1 /* point to last byte to flush */ | ||
58 | andi r0, r0, -CHIP_L1I_LINE_SIZE() /* align to cache-line size */ | ||
59 | } | ||
60 | { | ||
61 | andi r1, r1, -CHIP_L1I_LINE_SIZE() /* last cache line to flush */ | ||
62 | mf | ||
63 | } | ||
64 | #if CHIP_L1I_CACHE_SIZE() > PAGE_SIZE | ||
65 | { | ||
66 | moveli r4, CHIP_L1I_CACHE_SIZE() / PAGE_SIZE /* loop counter */ | ||
67 | move r2, r0 /* remember starting address */ | ||
68 | } | ||
69 | #endif | ||
70 | drain | ||
71 | { | ||
72 | slt r3, r0, r1 /* set up loop invariant */ | ||
73 | #if CHIP_L1I_CACHE_SIZE() > PAGE_SIZE | ||
74 | moveli r6, PAGE_SIZE | ||
75 | #endif | ||
76 | } | ||
77 | .Lentry: | ||
78 | { | ||
79 | icoh r0 | ||
80 | ADDI_PTR r0, r0, CHIP_L1I_LINE_SIZE() /* advance buffer */ | ||
81 | } | ||
82 | { | ||
83 | slt r3, r0, r1 /* check if buffer < buffer + size */ | ||
84 | bbst r3, .Lentry /* loop if buffer < buffer + size */ | ||
85 | } | ||
86 | #if CHIP_L1I_CACHE_SIZE() > PAGE_SIZE | ||
87 | { | ||
88 | ADD_PTR r2, r2, r6 | ||
89 | ADD_PTR r1, r1, r6 | ||
90 | } | ||
91 | { | ||
92 | move r0, r2 | ||
93 | addi r4, r4, -1 | ||
94 | } | ||
95 | { | ||
96 | slt r3, r0, r1 /* set up loop invariant */ | ||
97 | bnezt r4, .Lentry | ||
98 | } | ||
99 | #endif | ||
100 | drain | ||
101 | .Lexit: | ||
102 | jrp lr | ||
103 | |||
104 | .Lend___invalidate_icache: | ||
105 | .size __invalidate_icache, \ | ||
106 | .Lend___invalidate_icache - __invalidate_icache | ||
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c new file mode 100644 index 000000000000..be1e8acd105d --- /dev/null +++ b/arch/tile/lib/atomic_32.c | |||
@@ -0,0 +1,347 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/cache.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <asm/atomic.h> | ||
21 | #include <arch/chip.h> | ||
22 | |||
23 | /* The routines in atomic_asm.S are private, so we only declare them here. */ | ||
24 | extern struct __get_user __atomic_cmpxchg(volatile int *p, | ||
25 | int *lock, int o, int n); | ||
26 | extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); | ||
27 | extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); | ||
28 | extern struct __get_user __atomic_xchg_add_unless(volatile int *p, | ||
29 | int *lock, int o, int n); | ||
30 | extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); | ||
31 | extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); | ||
32 | extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); | ||
33 | |||
34 | extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); | ||
35 | extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); | ||
36 | extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); | ||
37 | extern u64 __atomic64_xchg_add_unless(volatile u64 *p, | ||
38 | int *lock, u64 o, u64 n); | ||
39 | |||
40 | |||
41 | /* See <asm/atomic.h> */ | ||
42 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
43 | |||
44 | /* | ||
45 | * A block of memory containing locks for atomic ops. Each instance of this | ||
46 | * struct will be homed on a different CPU. | ||
47 | */ | ||
48 | struct atomic_locks_on_cpu { | ||
49 | int lock[ATOMIC_HASH_L2_SIZE]; | ||
50 | } __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4))); | ||
51 | |||
52 | static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool); | ||
53 | |||
54 | /* The locks we'll use until __init_atomic_per_cpu is called. */ | ||
55 | static struct atomic_locks_on_cpu __initdata initial_atomic_locks; | ||
56 | |||
57 | /* Hash into this vector to get a pointer to lock for the given atomic. */ | ||
58 | struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE] | ||
59 | __write_once = { | ||
60 | [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks) | ||
61 | }; | ||
62 | |||
63 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
64 | |||
65 | /* This page is remapped on startup to be hash-for-home. */ | ||
66 | int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */] | ||
67 | __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned"))); | ||
68 | |||
69 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
70 | |||
71 | static inline int *__atomic_hashed_lock(volatile void *v) | ||
72 | { | ||
73 | /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec.S */ | ||
74 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
75 | unsigned long i = | ||
76 | (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long)); | ||
77 | unsigned long n = __insn_crc32_32(0, i); | ||
78 | |||
79 | /* Grab high bits for L1 index. */ | ||
80 | unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT); | ||
81 | /* Grab low bits for L2 index. */ | ||
82 | unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1); | ||
83 | |||
84 | return &atomic_lock_ptr[l1_index]->lock[l2_index]; | ||
85 | #else | ||
86 | /* | ||
87 | * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. | ||
88 | * Using mm works here because atomic_locks is page aligned. | ||
89 | */ | ||
90 | unsigned long ptr = __insn_mm((unsigned long)v >> 1, | ||
91 | (unsigned long)atomic_locks, | ||
92 | 2, (ATOMIC_HASH_SHIFT + 2) - 1); | ||
93 | return (int *)ptr; | ||
94 | #endif | ||
95 | } | ||
96 | |||
97 | #ifdef CONFIG_SMP | ||
98 | /* Return whether the passed pointer is a valid atomic lock pointer. */ | ||
99 | static int is_atomic_lock(int *p) | ||
100 | { | ||
101 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
102 | int i; | ||
103 | for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { | ||
104 | |||
105 | if (p >= &atomic_lock_ptr[i]->lock[0] && | ||
106 | p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) { | ||
107 | return 1; | ||
108 | } | ||
109 | } | ||
110 | return 0; | ||
111 | #else | ||
112 | return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; | ||
113 | #endif | ||
114 | } | ||
115 | |||
116 | void __atomic_fault_unlock(int *irqlock_word) | ||
117 | { | ||
118 | BUG_ON(!is_atomic_lock(irqlock_word)); | ||
119 | BUG_ON(*irqlock_word != 1); | ||
120 | *irqlock_word = 0; | ||
121 | } | ||
122 | |||
123 | #endif /* CONFIG_SMP */ | ||
124 | |||
125 | static inline int *__atomic_setup(volatile void *v) | ||
126 | { | ||
127 | /* Issue a load to the target to bring it into cache. */ | ||
128 | *(volatile int *)v; | ||
129 | return __atomic_hashed_lock(v); | ||
130 | } | ||
131 | |||
132 | int _atomic_xchg(atomic_t *v, int n) | ||
133 | { | ||
134 | return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; | ||
135 | } | ||
136 | EXPORT_SYMBOL(_atomic_xchg); | ||
137 | |||
138 | int _atomic_xchg_add(atomic_t *v, int i) | ||
139 | { | ||
140 | return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; | ||
141 | } | ||
142 | EXPORT_SYMBOL(_atomic_xchg_add); | ||
143 | |||
144 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u) | ||
145 | { | ||
146 | /* | ||
147 | * Note: argument order is switched here since it is easier | ||
148 | * to use the first argument consistently as the "old value" | ||
149 | * in the assembly, as is done for _atomic_cmpxchg(). | ||
150 | */ | ||
151 | return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) | ||
152 | .val; | ||
153 | } | ||
154 | EXPORT_SYMBOL(_atomic_xchg_add_unless); | ||
155 | |||
156 | int _atomic_cmpxchg(atomic_t *v, int o, int n) | ||
157 | { | ||
158 | return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; | ||
159 | } | ||
160 | EXPORT_SYMBOL(_atomic_cmpxchg); | ||
161 | |||
162 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) | ||
163 | { | ||
164 | return __atomic_or((int *)p, __atomic_setup(p), mask).val; | ||
165 | } | ||
166 | EXPORT_SYMBOL(_atomic_or); | ||
167 | |||
168 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) | ||
169 | { | ||
170 | return __atomic_andn((int *)p, __atomic_setup(p), mask).val; | ||
171 | } | ||
172 | EXPORT_SYMBOL(_atomic_andn); | ||
173 | |||
174 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask) | ||
175 | { | ||
176 | return __atomic_xor((int *)p, __atomic_setup(p), mask).val; | ||
177 | } | ||
178 | EXPORT_SYMBOL(_atomic_xor); | ||
179 | |||
180 | |||
181 | u64 _atomic64_xchg(atomic64_t *v, u64 n) | ||
182 | { | ||
183 | return __atomic64_xchg(&v->counter, __atomic_setup(v), n); | ||
184 | } | ||
185 | EXPORT_SYMBOL(_atomic64_xchg); | ||
186 | |||
187 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i) | ||
188 | { | ||
189 | return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); | ||
190 | } | ||
191 | EXPORT_SYMBOL(_atomic64_xchg_add); | ||
192 | |||
193 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) | ||
194 | { | ||
195 | /* | ||
196 | * Note: argument order is switched here since it is easier | ||
197 | * to use the first argument consistently as the "old value" | ||
198 | * in the assembly, as is done for _atomic_cmpxchg(). | ||
199 | */ | ||
200 | return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), | ||
201 | u, a); | ||
202 | } | ||
203 | EXPORT_SYMBOL(_atomic64_xchg_add_unless); | ||
204 | |||
205 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||
206 | { | ||
207 | return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); | ||
208 | } | ||
209 | EXPORT_SYMBOL(_atomic64_cmpxchg); | ||
210 | |||
211 | |||
212 | static inline int *__futex_setup(__user int *v) | ||
213 | { | ||
214 | /* | ||
215 | * Issue a prefetch to the counter to bring it into cache. | ||
216 | * As for __atomic_setup, but we can't do a read into the L1 | ||
217 | * since it might fault; instead we do a prefetch into the L2. | ||
218 | */ | ||
219 | __insn_prefetch(v); | ||
220 | return __atomic_hashed_lock(v); | ||
221 | } | ||
222 | |||
223 | struct __get_user futex_set(int *v, int i) | ||
224 | { | ||
225 | return __atomic_xchg(v, __futex_setup(v), i); | ||
226 | } | ||
227 | |||
228 | struct __get_user futex_add(int *v, int n) | ||
229 | { | ||
230 | return __atomic_xchg_add(v, __futex_setup(v), n); | ||
231 | } | ||
232 | |||
233 | struct __get_user futex_or(int *v, int n) | ||
234 | { | ||
235 | return __atomic_or(v, __futex_setup(v), n); | ||
236 | } | ||
237 | |||
238 | struct __get_user futex_andn(int *v, int n) | ||
239 | { | ||
240 | return __atomic_andn(v, __futex_setup(v), n); | ||
241 | } | ||
242 | |||
243 | struct __get_user futex_xor(int *v, int n) | ||
244 | { | ||
245 | return __atomic_xor(v, __futex_setup(v), n); | ||
246 | } | ||
247 | |||
248 | struct __get_user futex_cmpxchg(int *v, int o, int n) | ||
249 | { | ||
250 | return __atomic_cmpxchg(v, __futex_setup(v), o, n); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * If any of the atomic or futex routines hit a bad address (not in | ||
255 | * the page tables at kernel PL) this routine is called. The futex | ||
256 | * routines are never used on kernel space, and the normal atomics and | ||
257 | * bitops are never used on user space. So a fault on kernel space | ||
258 | * must be fatal, but a fault on userspace is a futex fault and we | ||
259 | * need to return -EFAULT. Note that the context this routine is | ||
260 | * invoked in is the context of the "_atomic_xxx()" routines called | ||
261 | * by the functions in this file. | ||
262 | */ | ||
263 | struct __get_user __atomic_bad_address(int *addr) | ||
264 | { | ||
265 | if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int)))) | ||
266 | panic("Bad address used for kernel atomic op: %p\n", addr); | ||
267 | return (struct __get_user) { .err = -EFAULT }; | ||
268 | } | ||
269 | |||
270 | |||
271 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
272 | static int __init noatomichash(char *str) | ||
273 | { | ||
274 | printk("noatomichash is deprecated.\n"); | ||
275 | return 1; | ||
276 | } | ||
277 | __setup("noatomichash", noatomichash); | ||
278 | #endif | ||
279 | |||
280 | void __init __init_atomic_per_cpu(void) | ||
281 | { | ||
282 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
283 | |||
284 | unsigned int i; | ||
285 | int actual_cpu; | ||
286 | |||
287 | /* | ||
288 | * Before this is called from setup, we just have one lock for | ||
289 | * all atomic objects/operations. Here we replace the | ||
290 | * elements of atomic_lock_ptr so that they point at per_cpu | ||
291 | * integers. This seemingly over-complex approach stems from | ||
292 | * the fact that DEFINE_PER_CPU defines an entry for each cpu | ||
293 | * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But | ||
294 | * for efficient hashing of atomics to their locks we want a | ||
295 | * compile time constant power of 2 for the size of this | ||
296 | * table, so we use ATOMIC_HASH_SIZE. | ||
297 | * | ||
298 | * Here we populate atomic_lock_ptr from the per cpu | ||
299 | * atomic_lock_pool, interspersing by actual cpu so that | ||
300 | * subsequent elements are homed on consecutive cpus. | ||
301 | */ | ||
302 | |||
303 | actual_cpu = cpumask_first(cpu_possible_mask); | ||
304 | |||
305 | for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) { | ||
306 | /* | ||
307 | * Preincrement to slightly bias against using cpu 0, | ||
308 | * which has plenty of stuff homed on it already. | ||
309 | */ | ||
310 | actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask); | ||
311 | if (actual_cpu >= nr_cpu_ids) | ||
312 | actual_cpu = cpumask_first(cpu_possible_mask); | ||
313 | |||
314 | atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu); | ||
315 | } | ||
316 | |||
317 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
318 | |||
319 | /* Validate power-of-two and "bigger than cpus" assumption */ | ||
320 | BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); | ||
321 | BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); | ||
322 | |||
323 | /* | ||
324 | * On TILEPro we prefer to use a single hash-for-home | ||
325 | * page, since this means atomic operations are less | ||
326 | * likely to encounter a TLB fault and thus should | ||
327 | * in general perform faster. You may wish to disable | ||
328 | * this in situations where few hash-for-home tiles | ||
329 | * are configured. | ||
330 | */ | ||
331 | BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0); | ||
332 | |||
333 | /* The locks must all fit on one page. */ | ||
334 | BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE); | ||
335 | |||
336 | /* | ||
337 | * We use the page offset of the atomic value's address as | ||
338 | * an index into atomic_locks, excluding the low 3 bits. | ||
339 | * That should not produce more indices than ATOMIC_HASH_SIZE. | ||
340 | */ | ||
341 | BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); | ||
342 | |||
343 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
344 | |||
345 | /* The futex code makes this assumption, so we validate it here. */ | ||
346 | BUG_ON(sizeof(atomic_t) != sizeof(int)); | ||
347 | } | ||
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S new file mode 100644 index 000000000000..c0d058578192 --- /dev/null +++ b/arch/tile/lib/atomic_asm_32.S | |||
@@ -0,0 +1,197 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Support routines for atomic operations. Each function takes: | ||
15 | * | ||
16 | * r0: address to manipulate | ||
17 | * r1: pointer to atomic lock guarding this operation (for FUTEX_LOCK_REG) | ||
18 | * r2: new value to write, or for cmpxchg/add_unless, value to compare against | ||
19 | * r3: (cmpxchg/xchg_add_unless) new value to write or add; | ||
20 | * (atomic64 ops) high word of value to write | ||
21 | * r4/r5: (cmpxchg64/add_unless64) new value to write or add | ||
22 | * | ||
23 | * The 32-bit routines return a "struct __get_user" so that the futex code | ||
24 | * has an opportunity to return -EFAULT to the user if needed. | ||
25 | * The 64-bit routines just return a "long long" with the value, | ||
26 | * since they are only used from kernel space and don't expect to fault. | ||
27 | * Support for 16-bit ops is included in the framework but we don't provide | ||
28 | * any (x86_64 has an atomic_inc_short(), so we might want to some day). | ||
29 | * | ||
30 | * Note that the caller is advised to issue a suitable L1 or L2 | ||
31 | * prefetch on the address being manipulated to avoid extra stalls. | ||
32 | * In addition, the hot path is on two icache lines, and we start with | ||
33 | * a jump to the second line to make sure they are both in cache so | ||
34 | * that we never stall waiting on icache fill while holding the lock. | ||
35 | * (This doesn't work out with most 64-bit ops, since they consume | ||
36 | * too many bundles, so may take an extra i-cache stall.) | ||
37 | * | ||
38 | * These routines set the INTERRUPT_CRITICAL_SECTION bit, just | ||
39 | * like sys_cmpxchg(), so that NMIs like PERF_COUNT will not interrupt | ||
40 | * the code, just page faults. | ||
41 | * | ||
42 | * If the load or store faults in a way that can be directly fixed in | ||
43 | * the do_page_fault_ics() handler (e.g. a vmalloc reference) we fix it | ||
44 | * directly, return to the instruction that faulted, and retry it. | ||
45 | * | ||
46 | * If the load or store faults in a way that potentially requires us | ||
47 | * to release the atomic lock, then retry (e.g. a migrating PTE), we | ||
48 | * reset the PC in do_page_fault_ics() to the "tns" instruction so | ||
49 | * that on return we will reacquire the lock and restart the op. We | ||
50 | * are somewhat overloading the exception_table_entry notion by doing | ||
51 | * this, since those entries are not normally used for migrating PTEs. | ||
52 | * | ||
53 | * If the main page fault handler discovers a bad address, it will see | ||
54 | * the PC pointing to the "tns" instruction (due to the earlier | ||
55 | * exception_table_entry processing in do_page_fault_ics), and | ||
56 | * re-reset the PC to the fault handler, atomic_bad_address(), which | ||
57 | * effectively takes over from the atomic op and can either return a | ||
58 | * bad "struct __get_user" (for user addresses) or can just panic (for | ||
59 | * bad kernel addresses). | ||
60 | * | ||
61 | * Note that if the value we would store is the same as what we | ||
62 | * loaded, we bypass the load. Other platforms with true atomics can | ||
63 | * make the guarantee that a non-atomic __clear_bit(), for example, | ||
64 | * can safely race with an atomic test_and_set_bit(); this example is | ||
65 | * from bit_spinlock.h in slub_lock() / slub_unlock(). We can't do | ||
66 | * that on Tile since the "atomic" op is really just a | ||
67 | * read/modify/write, and can race with the non-atomic | ||
68 | * read/modify/write. However, if we can short-circuit the write when | ||
69 | * it is not needed, in the atomic case, we avoid the race. | ||
70 | */ | ||
71 | |||
72 | #include <linux/linkage.h> | ||
73 | #include <asm/atomic.h> | ||
74 | #include <asm/page.h> | ||
75 | #include <asm/processor.h> | ||
76 | |||
77 | .section .text.atomic,"ax" | ||
78 | ENTRY(__start_atomic_asm_code) | ||
79 | |||
80 | .macro atomic_op, name, bitwidth, body | ||
81 | .align 64 | ||
82 | STD_ENTRY_SECTION(__atomic\name, .text.atomic) | ||
83 | { | ||
84 | movei r24, 1 | ||
85 | j 4f /* branch to second cache line */ | ||
86 | } | ||
87 | 1: { | ||
88 | .ifc \bitwidth,16 | ||
89 | lh r22, r0 | ||
90 | .else | ||
91 | lw r22, r0 | ||
92 | addi r23, r0, 4 | ||
93 | .endif | ||
94 | } | ||
95 | .ifc \bitwidth,64 | ||
96 | lw r23, r23 | ||
97 | .endif | ||
98 | \body /* set r24, and r25 if 64-bit */ | ||
99 | { | ||
100 | seq r26, r22, r24 | ||
101 | seq r27, r23, r25 | ||
102 | } | ||
103 | .ifc \bitwidth,64 | ||
104 | bbnst r27, 2f | ||
105 | .endif | ||
106 | bbs r26, 3f /* skip write-back if it's the same value */ | ||
107 | 2: { | ||
108 | .ifc \bitwidth,16 | ||
109 | sh r0, r24 | ||
110 | .else | ||
111 | sw r0, r24 | ||
112 | addi r23, r0, 4 | ||
113 | .endif | ||
114 | } | ||
115 | .ifc \bitwidth,64 | ||
116 | sw r23, r25 | ||
117 | .endif | ||
118 | mf | ||
119 | 3: { | ||
120 | move r0, r22 | ||
121 | .ifc \bitwidth,64 | ||
122 | move r1, r23 | ||
123 | .else | ||
124 | move r1, zero | ||
125 | .endif | ||
126 | sw ATOMIC_LOCK_REG_NAME, zero | ||
127 | } | ||
128 | mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
129 | jrp lr | ||
130 | 4: { | ||
131 | move ATOMIC_LOCK_REG_NAME, r1 | ||
132 | mtspr INTERRUPT_CRITICAL_SECTION, r24 | ||
133 | } | ||
134 | #ifndef CONFIG_SMP | ||
135 | j 1b /* no atomic locks */ | ||
136 | #else | ||
137 | { | ||
138 | tns r21, ATOMIC_LOCK_REG_NAME | ||
139 | moveli r23, 2048 /* maximum backoff time in cycles */ | ||
140 | } | ||
141 | { | ||
142 | bzt r21, 1b /* branch if lock acquired */ | ||
143 | moveli r25, 32 /* starting backoff time in cycles */ | ||
144 | } | ||
145 | 5: mtspr INTERRUPT_CRITICAL_SECTION, zero | ||
146 | mfspr r26, CYCLE_LOW /* get start point for this backoff */ | ||
147 | 6: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */ | ||
148 | sub r22, r22, r26 | ||
149 | slt r22, r22, r25 | ||
150 | bbst r22, 6b | ||
151 | { | ||
152 | mtspr INTERRUPT_CRITICAL_SECTION, r24 | ||
153 | shli r25, r25, 1 /* double the backoff; retry the tns */ | ||
154 | } | ||
155 | { | ||
156 | tns r21, ATOMIC_LOCK_REG_NAME | ||
157 | slt r26, r23, r25 /* is the proposed backoff too big? */ | ||
158 | } | ||
159 | { | ||
160 | bzt r21, 1b /* branch if lock acquired */ | ||
161 | mvnz r25, r26, r23 | ||
162 | } | ||
163 | j 5b | ||
164 | #endif | ||
165 | STD_ENDPROC(__atomic\name) | ||
166 | .ifc \bitwidth,32 | ||
167 | .pushsection __ex_table,"a" | ||
168 | .word 1b, __atomic\name | ||
169 | .word 2b, __atomic\name | ||
170 | .word __atomic\name, __atomic_bad_address | ||
171 | .popsection | ||
172 | .endif | ||
173 | .endm | ||
174 | |||
175 | atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }" | ||
176 | atomic_op _xchg, 32, "move r24, r2" | ||
177 | atomic_op _xchg_add, 32, "add r24, r22, r2" | ||
178 | atomic_op _xchg_add_unless, 32, \ | ||
179 | "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" | ||
180 | atomic_op _or, 32, "or r24, r22, r2" | ||
181 | atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" | ||
182 | atomic_op _xor, 32, "xor r24, r22, r2" | ||
183 | |||
184 | atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \ | ||
185 | { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }" | ||
186 | atomic_op 64_xchg, 64, "{ move r24, r2; move r25, r3 }" | ||
187 | atomic_op 64_xchg_add, 64, "{ add r24, r22, r2; add r25, r23, r3 }; \ | ||
188 | slt_u r26, r24, r22; add r25, r25, r26" | ||
189 | atomic_op 64_xchg_add_unless, 64, \ | ||
190 | "{ sne r26, r22, r2; sne r27, r23, r3 }; \ | ||
191 | { bbns r26, 3f; add r24, r22, r4 }; \ | ||
192 | { bbns r27, 3f; add r25, r23, r5 }; \ | ||
193 | slt_u r26, r24, r22; add r25, r25, r26" | ||
194 | |||
195 | jrp lr /* happy backtracer */ | ||
196 | |||
197 | ENTRY(__end_atomic_asm_code) | ||
diff --git a/arch/tile/lib/checksum.c b/arch/tile/lib/checksum.c new file mode 100644 index 000000000000..e4bab5bd3f31 --- /dev/null +++ b/arch/tile/lib/checksum.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * Support code for the main lib/checksum.c. | ||
14 | */ | ||
15 | |||
16 | #include <net/checksum.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | static inline unsigned int longto16(unsigned long x) | ||
20 | { | ||
21 | unsigned long ret; | ||
22 | #ifdef __tilegx__ | ||
23 | ret = __insn_v2sadu(x, 0); | ||
24 | ret = __insn_v2sadu(ret, 0); | ||
25 | #else | ||
26 | ret = __insn_sadh_u(x, 0); | ||
27 | ret = __insn_sadh_u(ret, 0); | ||
28 | #endif | ||
29 | return ret; | ||
30 | } | ||
31 | |||
32 | __wsum do_csum(const unsigned char *buff, int len) | ||
33 | { | ||
34 | int odd, count; | ||
35 | unsigned long result = 0; | ||
36 | |||
37 | if (len <= 0) | ||
38 | goto out; | ||
39 | odd = 1 & (unsigned long) buff; | ||
40 | if (odd) { | ||
41 | result = (*buff << 8); | ||
42 | len--; | ||
43 | buff++; | ||
44 | } | ||
45 | count = len >> 1; /* nr of 16-bit words.. */ | ||
46 | if (count) { | ||
47 | if (2 & (unsigned long) buff) { | ||
48 | result += *(const unsigned short *)buff; | ||
49 | count--; | ||
50 | len -= 2; | ||
51 | buff += 2; | ||
52 | } | ||
53 | count >>= 1; /* nr of 32-bit words.. */ | ||
54 | if (count) { | ||
55 | #ifdef __tilegx__ | ||
56 | if (4 & (unsigned long) buff) { | ||
57 | unsigned int w = *(const unsigned int *)buff; | ||
58 | result = __insn_v2sadau(result, w, 0); | ||
59 | count--; | ||
60 | len -= 4; | ||
61 | buff += 4; | ||
62 | } | ||
63 | count >>= 1; /* nr of 64-bit words.. */ | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * This algorithm could wrap around for very | ||
68 | * large buffers, but those should be impossible. | ||
69 | */ | ||
70 | BUG_ON(count >= 65530); | ||
71 | |||
72 | while (count) { | ||
73 | unsigned long w = *(const unsigned long *)buff; | ||
74 | count--; | ||
75 | buff += sizeof(w); | ||
76 | #ifdef __tilegx__ | ||
77 | result = __insn_v2sadau(result, w, 0); | ||
78 | #else | ||
79 | result = __insn_sadah_u(result, w, 0); | ||
80 | #endif | ||
81 | } | ||
82 | #ifdef __tilegx__ | ||
83 | if (len & 4) { | ||
84 | unsigned int w = *(const unsigned int *)buff; | ||
85 | result = __insn_v2sadau(result, w, 0); | ||
86 | buff += 4; | ||
87 | } | ||
88 | #endif | ||
89 | } | ||
90 | if (len & 2) { | ||
91 | result += *(const unsigned short *) buff; | ||
92 | buff += 2; | ||
93 | } | ||
94 | } | ||
95 | if (len & 1) | ||
96 | result += *buff; | ||
97 | result = longto16(result); | ||
98 | if (odd) | ||
99 | result = swab16(result); | ||
100 | out: | ||
101 | return result; | ||
102 | } | ||
diff --git a/arch/tile/lib/cpumask.c b/arch/tile/lib/cpumask.c new file mode 100644 index 000000000000..af745b3b2559 --- /dev/null +++ b/arch/tile/lib/cpumask.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/cpumask.h> | ||
16 | #include <linux/ctype.h> | ||
17 | #include <linux/errno.h> | ||
18 | |||
19 | /* | ||
20 | * Allow cropping out bits beyond the end of the array. | ||
21 | * Move to "lib" directory if more clients want to use this routine. | ||
22 | */ | ||
23 | int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits) | ||
24 | { | ||
25 | unsigned a, b; | ||
26 | |||
27 | bitmap_zero(maskp, nmaskbits); | ||
28 | do { | ||
29 | if (!isdigit(*bp)) | ||
30 | return -EINVAL; | ||
31 | a = simple_strtoul(bp, (char **)&bp, 10); | ||
32 | b = a; | ||
33 | if (*bp == '-') { | ||
34 | bp++; | ||
35 | if (!isdigit(*bp)) | ||
36 | return -EINVAL; | ||
37 | b = simple_strtoul(bp, (char **)&bp, 10); | ||
38 | } | ||
39 | if (!(a <= b)) | ||
40 | return -EINVAL; | ||
41 | if (b >= nmaskbits) | ||
42 | b = nmaskbits-1; | ||
43 | while (a <= b) { | ||
44 | set_bit(a, maskp); | ||
45 | a++; | ||
46 | } | ||
47 | if (*bp == ',') | ||
48 | bp++; | ||
49 | } while (*bp != '\0' && *bp != '\n'); | ||
50 | return 0; | ||
51 | } | ||
diff --git a/arch/tile/lib/delay.c b/arch/tile/lib/delay.c new file mode 100644 index 000000000000..5801b03c13ef --- /dev/null +++ b/arch/tile/lib/delay.c | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/thread_info.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <hv/hypervisor.h> | ||
20 | |||
21 | void __udelay(unsigned long usecs) | ||
22 | { | ||
23 | hv_nanosleep(usecs * 1000); | ||
24 | } | ||
25 | EXPORT_SYMBOL(__udelay); | ||
26 | |||
27 | void __ndelay(unsigned long nsecs) | ||
28 | { | ||
29 | hv_nanosleep(nsecs); | ||
30 | } | ||
31 | EXPORT_SYMBOL(__ndelay); | ||
32 | |||
33 | /* FIXME: should be declared in a header somewhere. */ | ||
34 | EXPORT_SYMBOL(__delay); | ||
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c new file mode 100644 index 000000000000..af8e70e2a0ce --- /dev/null +++ b/arch/tile/lib/exports.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Exports from assembler code and from libtile-cc. | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | |||
19 | /* arch/tile/lib/usercopy.S */ | ||
20 | #include <linux/uaccess.h> | ||
21 | EXPORT_SYMBOL(__get_user_1); | ||
22 | EXPORT_SYMBOL(__get_user_2); | ||
23 | EXPORT_SYMBOL(__get_user_4); | ||
24 | EXPORT_SYMBOL(__put_user_1); | ||
25 | EXPORT_SYMBOL(__put_user_2); | ||
26 | EXPORT_SYMBOL(__put_user_4); | ||
27 | EXPORT_SYMBOL(__put_user_8); | ||
28 | EXPORT_SYMBOL(strnlen_user_asm); | ||
29 | EXPORT_SYMBOL(strncpy_from_user_asm); | ||
30 | EXPORT_SYMBOL(clear_user_asm); | ||
31 | |||
32 | /* arch/tile/kernel/entry.S */ | ||
33 | #include <linux/kernel.h> | ||
34 | #include <asm/processor.h> | ||
35 | EXPORT_SYMBOL(current_text_addr); | ||
36 | EXPORT_SYMBOL(dump_stack); | ||
37 | |||
38 | /* arch/tile/lib/__memcpy.S */ | ||
39 | /* NOTE: on TILE64, these symbols appear in arch/tile/lib/memcpy_tile64.c */ | ||
40 | EXPORT_SYMBOL(memcpy); | ||
41 | EXPORT_SYMBOL(__copy_to_user_inatomic); | ||
42 | EXPORT_SYMBOL(__copy_from_user_inatomic); | ||
43 | EXPORT_SYMBOL(__copy_from_user_zeroing); | ||
44 | |||
45 | /* hypervisor glue */ | ||
46 | #include <hv/hypervisor.h> | ||
47 | EXPORT_SYMBOL(hv_dev_open); | ||
48 | EXPORT_SYMBOL(hv_dev_pread); | ||
49 | EXPORT_SYMBOL(hv_dev_pwrite); | ||
50 | EXPORT_SYMBOL(hv_dev_close); | ||
51 | |||
52 | /* -ltile-cc */ | ||
53 | uint32_t __udivsi3(uint32_t dividend, uint32_t divisor); | ||
54 | EXPORT_SYMBOL(__udivsi3); | ||
55 | int32_t __divsi3(int32_t dividend, int32_t divisor); | ||
56 | EXPORT_SYMBOL(__divsi3); | ||
57 | uint64_t __udivdi3(uint64_t dividend, uint64_t divisor); | ||
58 | EXPORT_SYMBOL(__udivdi3); | ||
59 | int64_t __divdi3(int64_t dividend, int64_t divisor); | ||
60 | EXPORT_SYMBOL(__divdi3); | ||
61 | uint32_t __umodsi3(uint32_t dividend, uint32_t divisor); | ||
62 | EXPORT_SYMBOL(__umodsi3); | ||
63 | int32_t __modsi3(int32_t dividend, int32_t divisor); | ||
64 | EXPORT_SYMBOL(__modsi3); | ||
65 | uint64_t __umoddi3(uint64_t dividend, uint64_t divisor); | ||
66 | EXPORT_SYMBOL(__umoddi3); | ||
67 | int64_t __moddi3(int64_t dividend, int64_t divisor); | ||
68 | EXPORT_SYMBOL(__moddi3); | ||
69 | #ifndef __tilegx__ | ||
70 | uint64_t __ll_mul(uint64_t n0, uint64_t n1); | ||
71 | EXPORT_SYMBOL(__ll_mul); | ||
72 | #endif | ||
73 | #ifndef __tilegx__ | ||
74 | int64_t __muldi3(int64_t, int64_t); | ||
75 | EXPORT_SYMBOL(__muldi3); | ||
76 | uint64_t __lshrdi3(uint64_t, unsigned int); | ||
77 | EXPORT_SYMBOL(__lshrdi3); | ||
78 | #endif | ||
diff --git a/arch/tile/lib/mb_incoherent.S b/arch/tile/lib/mb_incoherent.S new file mode 100644 index 000000000000..989ad7b68d5a --- /dev/null +++ b/arch/tile/lib/mb_incoherent.S | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Assembly code for invoking the HV's fence_incoherent syscall. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | #include <hv/syscall_public.h> | ||
19 | #include <arch/abi.h> | ||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
23 | |||
24 | /* | ||
25 | * Invoke the hypervisor's fence_incoherent syscall, which guarantees | ||
26 | * that all victims for cachelines homed on this tile have reached memory. | ||
27 | */ | ||
28 | STD_ENTRY(__mb_incoherent) | ||
29 | moveli TREG_SYSCALL_NR_NAME, HV_SYS_fence_incoherent | ||
30 | swint2 | ||
31 | jrp lr | ||
32 | STD_ENDPROC(__mb_incoherent) | ||
33 | |||
34 | #endif | ||
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c new file mode 100644 index 000000000000..6235283b4859 --- /dev/null +++ b/arch/tile/lib/memchr_32.c | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | void *memchr(const void *s, int c, size_t n) | ||
20 | { | ||
21 | /* Get an aligned pointer. */ | ||
22 | const uintptr_t s_int = (uintptr_t) s; | ||
23 | const uint32_t *p = (const uint32_t *)(s_int & -4); | ||
24 | |||
25 | /* Create four copies of the byte for which we are looking. */ | ||
26 | const uint32_t goal = 0x01010101 * (uint8_t) c; | ||
27 | |||
28 | /* Read the first word, but munge it so that bytes before the array | ||
29 | * will not match goal. | ||
30 | * | ||
31 | * Note that this shift count expression works because we know | ||
32 | * shift counts are taken mod 32. | ||
33 | */ | ||
34 | const uint32_t before_mask = (1 << (s_int << 3)) - 1; | ||
35 | uint32_t v = (*p | before_mask) ^ (goal & before_mask); | ||
36 | |||
37 | /* Compute the address of the last byte. */ | ||
38 | const char *const last_byte_ptr = (const char *)s + n - 1; | ||
39 | |||
40 | /* Compute the address of the word containing the last byte. */ | ||
41 | const uint32_t *const last_word_ptr = | ||
42 | (const uint32_t *)((uintptr_t) last_byte_ptr & -4); | ||
43 | |||
44 | uint32_t bits; | ||
45 | char *ret; | ||
46 | |||
47 | if (__builtin_expect(n == 0, 0)) { | ||
48 | /* Don't dereference any memory if the array is empty. */ | ||
49 | return NULL; | ||
50 | } | ||
51 | |||
52 | while ((bits = __insn_seqb(v, goal)) == 0) { | ||
53 | if (__builtin_expect(p == last_word_ptr, 0)) { | ||
54 | /* We already read the last word in the array, | ||
55 | * so give up. | ||
56 | */ | ||
57 | return NULL; | ||
58 | } | ||
59 | v = *++p; | ||
60 | } | ||
61 | |||
62 | /* We found a match, but it might be in a byte past the end | ||
63 | * of the array. | ||
64 | */ | ||
65 | ret = ((char *)p) + (__insn_ctz(bits) >> 3); | ||
66 | return (ret <= last_byte_ptr) ? ret : NULL; | ||
67 | } | ||
68 | EXPORT_SYMBOL(memchr); | ||
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S new file mode 100644 index 000000000000..f92984bf60ec --- /dev/null +++ b/arch/tile/lib/memcpy_32.S | |||
@@ -0,0 +1,628 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file shares the implementation of the userspace memcpy and | ||
15 | * the kernel's memcpy, copy_to_user and copy_from_user. | ||
16 | */ | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_HAS_WH64() || defined(MEMCPY_TEST_WH64) | ||
21 | #define MEMCPY_USE_WH64 | ||
22 | #endif | ||
23 | |||
24 | |||
25 | #include <linux/linkage.h> | ||
26 | |||
27 | /* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */ | ||
28 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
29 | #define memcpy __memcpy_asm | ||
30 | #define __copy_to_user_inatomic __copy_to_user_inatomic_asm | ||
31 | #define __copy_from_user_inatomic __copy_from_user_inatomic_asm | ||
32 | #define __copy_from_user_zeroing __copy_from_user_zeroing_asm | ||
33 | #endif | ||
34 | |||
35 | #define IS_MEMCPY 0 | ||
36 | #define IS_COPY_FROM_USER 1 | ||
37 | #define IS_COPY_FROM_USER_ZEROING 2 | ||
38 | #define IS_COPY_TO_USER -1 | ||
39 | |||
40 | .section .text.memcpy_common, "ax" | ||
41 | .align 64 | ||
42 | |||
43 | /* Use this to preface each bundle that can cause an exception so | ||
44 | * the kernel can clean up properly. The special cleanup code should | ||
45 | * not use these, since it knows what it is doing. | ||
46 | */ | ||
47 | #define EX \ | ||
48 | .pushsection __ex_table, "a"; \ | ||
49 | .word 9f, memcpy_common_fixup; \ | ||
50 | .popsection; \ | ||
51 | 9 | ||
52 | |||
53 | |||
54 | /* __copy_from_user_inatomic takes the kernel target address in r0, | ||
55 | * the user source in r1, and the bytes to copy in r2. | ||
56 | * It returns the number of uncopiable bytes (hopefully zero) in r0. | ||
57 | */ | ||
58 | ENTRY(__copy_from_user_inatomic) | ||
59 | .type __copy_from_user_inatomic, @function | ||
60 | FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ | ||
61 | .text.memcpy_common, \ | ||
62 | .Lend_memcpy_common - __copy_from_user_inatomic) | ||
63 | { movei r29, IS_COPY_FROM_USER; j memcpy_common } | ||
64 | .size __copy_from_user_inatomic, . - __copy_from_user_inatomic | ||
65 | |||
66 | /* __copy_from_user_zeroing is like __copy_from_user_inatomic, but | ||
67 | * any uncopiable bytes are zeroed in the target. | ||
68 | */ | ||
69 | ENTRY(__copy_from_user_zeroing) | ||
70 | .type __copy_from_user_zeroing, @function | ||
71 | FEEDBACK_REENTER(__copy_from_user_inatomic) | ||
72 | { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common } | ||
73 | .size __copy_from_user_zeroing, . - __copy_from_user_zeroing | ||
74 | |||
75 | /* __copy_to_user_inatomic takes the user target address in r0, | ||
76 | * the kernel source in r1, and the bytes to copy in r2. | ||
77 | * It returns the number of uncopiable bytes (hopefully zero) in r0. | ||
78 | */ | ||
79 | ENTRY(__copy_to_user_inatomic) | ||
80 | .type __copy_to_user_inatomic, @function | ||
81 | FEEDBACK_REENTER(__copy_from_user_inatomic) | ||
82 | { movei r29, IS_COPY_TO_USER; j memcpy_common } | ||
83 | .size __copy_to_user_inatomic, . - __copy_to_user_inatomic | ||
84 | |||
85 | ENTRY(memcpy) | ||
86 | .type memcpy, @function | ||
87 | FEEDBACK_REENTER(__copy_from_user_inatomic) | ||
88 | { movei r29, IS_MEMCPY } | ||
89 | .size memcpy, . - memcpy | ||
90 | /* Fall through */ | ||
91 | |||
92 | .type memcpy_common, @function | ||
93 | memcpy_common: | ||
94 | /* On entry, r29 holds one of the IS_* macro values from above. */ | ||
95 | |||
96 | |||
97 | /* r0 is the dest, r1 is the source, r2 is the size. */ | ||
98 | |||
99 | /* Save aside original dest so we can return it at the end. */ | ||
100 | { sw sp, lr; move r23, r0; or r4, r0, r1 } | ||
101 | |||
102 | /* Check for an empty size. */ | ||
103 | { bz r2, .Ldone; andi r4, r4, 3 } | ||
104 | |||
105 | /* Save aside original values in case of a fault. */ | ||
106 | { move r24, r1; move r25, r2 } | ||
107 | move r27, lr | ||
108 | |||
109 | /* Check for an unaligned source or dest. */ | ||
110 | { bnz r4, .Lcopy_unaligned_maybe_many; addli r4, r2, -256 } | ||
111 | |||
112 | .Lcheck_aligned_copy_size: | ||
113 | /* If we are copying < 256 bytes, branch to simple case. */ | ||
114 | { blzt r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
115 | |||
116 | /* Copying >= 256 bytes, so jump to complex prefetching loop. */ | ||
117 | { andi r6, r1, 63; j .Lcopy_many } | ||
118 | |||
119 | /* | ||
120 | * | ||
121 | * Aligned 4 byte at a time copy loop | ||
122 | * | ||
123 | */ | ||
124 | |||
125 | .Lcopy_8_loop: | ||
126 | /* Copy two words at a time to hide load latency. */ | ||
127 | EX: { lw r3, r1; addi r1, r1, 4; slti_u r8, r2, 16 } | ||
128 | EX: { lw r4, r1; addi r1, r1, 4 } | ||
129 | EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } | ||
130 | EX: { sw r0, r4; addi r0, r0, 4; addi r2, r2, -4 } | ||
131 | .Lcopy_8_check: | ||
132 | { bzt r8, .Lcopy_8_loop; slti_u r4, r2, 4 } | ||
133 | |||
134 | /* Copy odd leftover word, if any. */ | ||
135 | { bnzt r4, .Lcheck_odd_stragglers } | ||
136 | EX: { lw r3, r1; addi r1, r1, 4 } | ||
137 | EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } | ||
138 | |||
139 | .Lcheck_odd_stragglers: | ||
140 | { bnz r2, .Lcopy_unaligned_few } | ||
141 | |||
142 | .Ldone: | ||
143 | /* For memcpy return original dest address, else zero. */ | ||
144 | { mz r0, r29, r23; jrp lr } | ||
145 | |||
146 | |||
147 | /* | ||
148 | * | ||
149 | * Prefetching multiple cache line copy handler (for large transfers). | ||
150 | * | ||
151 | */ | ||
152 | |||
153 | /* Copy words until r1 is cache-line-aligned. */ | ||
154 | .Lalign_loop: | ||
155 | EX: { lw r3, r1; addi r1, r1, 4 } | ||
156 | { andi r6, r1, 63 } | ||
157 | EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 } | ||
158 | .Lcopy_many: | ||
159 | { bnzt r6, .Lalign_loop; addi r9, r0, 63 } | ||
160 | |||
161 | { addi r3, r1, 60; andi r9, r9, -64 } | ||
162 | |||
163 | #ifdef MEMCPY_USE_WH64 | ||
164 | /* No need to prefetch dst, we'll just do the wh64 | ||
165 | * right before we copy a line. | ||
166 | */ | ||
167 | #endif | ||
168 | |||
169 | EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 } | ||
170 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
171 | { bnzt zero, .; move r27, lr } | ||
172 | EX: { lw r6, r3; addi r3, r3, 64 } | ||
173 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
174 | { bnzt zero, . } | ||
175 | EX: { lw r7, r3; addi r3, r3, 64 } | ||
176 | #ifndef MEMCPY_USE_WH64 | ||
177 | /* Prefetch the dest */ | ||
178 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
179 | { bnzt zero, . } | ||
180 | /* Use a real load to cause a TLB miss if necessary. We aren't using | ||
181 | * r28, so this should be fine. | ||
182 | */ | ||
183 | EX: { lw r28, r9; addi r9, r9, 64 } | ||
184 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
185 | { bnzt zero, . } | ||
186 | { prefetch r9; addi r9, r9, 64 } | ||
187 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
188 | { bnzt zero, . } | ||
189 | { prefetch r9; addi r9, r9, 64 } | ||
190 | #endif | ||
191 | /* Intentionally stall for a few cycles to leave L2 cache alone. */ | ||
192 | { bz zero, .Lbig_loop2 } | ||
193 | |||
194 | /* On entry to this loop: | ||
195 | * - r0 points to the start of dst line 0 | ||
196 | * - r1 points to start of src line 0 | ||
197 | * - r2 >= (256 - 60), only the first time the loop trips. | ||
198 | * - r3 contains r1 + 128 + 60 [pointer to end of source line 2] | ||
199 | * This is our prefetch address. When we get near the end | ||
200 | * rather than prefetching off the end this is changed to point | ||
201 | * to some "safe" recently loaded address. | ||
202 | * - r5 contains *(r1 + 60) [i.e. last word of source line 0] | ||
203 | * - r6 contains *(r1 + 64 + 60) [i.e. last word of source line 1] | ||
204 | * - r9 contains ((r0 + 63) & -64) | ||
205 | * [start of next dst cache line.] | ||
206 | */ | ||
207 | |||
208 | .Lbig_loop: | ||
209 | { jal .Lcopy_line2; add r15, r1, r2 } | ||
210 | |||
211 | .Lbig_loop2: | ||
212 | /* Copy line 0, first stalling until r5 is ready. */ | ||
213 | EX: { move r12, r5; lw r16, r1 } | ||
214 | { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
215 | /* Prefetch several lines ahead. */ | ||
216 | EX: { lw r5, r3; addi r3, r3, 64 } | ||
217 | { jal .Lcopy_line } | ||
218 | |||
219 | /* Copy line 1, first stalling until r6 is ready. */ | ||
220 | EX: { move r12, r6; lw r16, r1 } | ||
221 | { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
222 | /* Prefetch several lines ahead. */ | ||
223 | EX: { lw r6, r3; addi r3, r3, 64 } | ||
224 | { jal .Lcopy_line } | ||
225 | |||
226 | /* Copy line 2, first stalling until r7 is ready. */ | ||
227 | EX: { move r12, r7; lw r16, r1 } | ||
228 | { bz r4, .Lcopy_8_check; slti_u r8, r2, 8 } | ||
229 | /* Prefetch several lines ahead. */ | ||
230 | EX: { lw r7, r3; addi r3, r3, 64 } | ||
231 | /* Use up a caches-busy cycle by jumping back to the top of the | ||
232 | * loop. Might as well get it out of the way now. | ||
233 | */ | ||
234 | { j .Lbig_loop } | ||
235 | |||
236 | |||
237 | /* On entry: | ||
238 | * - r0 points to the destination line. | ||
239 | * - r1 points to the source line. | ||
240 | * - r3 is the next prefetch address. | ||
241 | * - r9 holds the last address used for wh64. | ||
242 | * - r12 = WORD_15 | ||
243 | * - r16 = WORD_0. | ||
244 | * - r17 == r1 + 16. | ||
245 | * - r27 holds saved lr to restore. | ||
246 | * | ||
247 | * On exit: | ||
248 | * - r0 is incremented by 64. | ||
249 | * - r1 is incremented by 64, unless that would point to a word | ||
250 | * beyond the end of the source array, in which case it is redirected | ||
251 | * to point to an arbitrary word already in the cache. | ||
252 | * - r2 is decremented by 64. | ||
253 | * - r3 is unchanged, unless it points to a word beyond the | ||
254 | * end of the source array, in which case it is redirected | ||
255 | * to point to an arbitrary word already in the cache. | ||
256 | * Redirecting is OK since if we are that close to the end | ||
257 | * of the array we will not come back to this subroutine | ||
258 | * and use the contents of the prefetched address. | ||
259 | * - r4 is nonzero iff r2 >= 64. | ||
260 | * - r9 is incremented by 64, unless it points beyond the | ||
261 | * end of the last full destination cache line, in which | ||
262 | * case it is redirected to a "safe address" that can be | ||
263 | * clobbered (sp - 64) | ||
264 | * - lr contains the value in r27. | ||
265 | */ | ||
266 | |||
267 | /* r26 unused */ | ||
268 | |||
269 | .Lcopy_line: | ||
270 | /* TODO: when r3 goes past the end, we would like to redirect it | ||
271 | * to prefetch the last partial cache line (if any) just once, for the | ||
272 | * benefit of the final cleanup loop. But we don't want to | ||
273 | * prefetch that line more than once, or subsequent prefetches | ||
274 | * will go into the RTF. But then .Lbig_loop should unconditionally | ||
275 | * branch to top of loop to execute final prefetch, and its | ||
276 | * nop should become a conditional branch. | ||
277 | */ | ||
278 | |||
279 | /* We need two non-memory cycles here to cover the resources | ||
280 | * used by the loads initiated by the caller. | ||
281 | */ | ||
282 | { add r15, r1, r2 } | ||
283 | .Lcopy_line2: | ||
284 | { slt_u r13, r3, r15; addi r17, r1, 16 } | ||
285 | |||
286 | /* NOTE: this will stall for one cycle as L1 is busy. */ | ||
287 | |||
288 | /* Fill second L1D line. */ | ||
289 | EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */ | ||
290 | |||
291 | #ifdef MEMCPY_TEST_WH64 | ||
292 | /* Issue a fake wh64 that clobbers the destination words | ||
293 | * with random garbage, for testing. | ||
294 | */ | ||
295 | { movei r19, 64; crc32_32 r10, r2, r9 } | ||
296 | .Lwh64_test_loop: | ||
297 | EX: { sw r9, r10; addi r9, r9, 4; addi r19, r19, -4 } | ||
298 | { bnzt r19, .Lwh64_test_loop; crc32_32 r10, r10, r19 } | ||
299 | #elif CHIP_HAS_WH64() | ||
300 | /* Prepare destination line for writing. */ | ||
301 | EX: { wh64 r9; addi r9, r9, 64 } | ||
302 | #else | ||
303 | /* Prefetch dest line */ | ||
304 | { prefetch r9; addi r9, r9, 64 } | ||
305 | #endif | ||
306 | /* Load seven words that are L1D hits to cover wh64 L2 usage. */ | ||
307 | |||
308 | /* Load the three remaining words from the last L1D line, which | ||
309 | * we know has already filled the L1D. | ||
310 | */ | ||
311 | EX: { lw r4, r1; addi r1, r1, 4; addi r20, r1, 16 } /* r4 = WORD_12 */ | ||
312 | EX: { lw r8, r1; addi r1, r1, 4; slt_u r13, r20, r15 }/* r8 = WORD_13 */ | ||
313 | EX: { lw r11, r1; addi r1, r1, -52; mvz r20, r13, r1 } /* r11 = WORD_14 */ | ||
314 | |||
315 | /* Load the three remaining words from the first L1D line, first | ||
316 | * stalling until it has filled by "looking at" r16. | ||
317 | */ | ||
318 | EX: { lw r13, r1; addi r1, r1, 4; move zero, r16 } /* r13 = WORD_1 */ | ||
319 | EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_2 */ | ||
320 | EX: { lw r15, r1; addi r1, r1, 8; addi r10, r0, 60 } /* r15 = WORD_3 */ | ||
321 | |||
322 | /* Load second word from the second L1D line, first | ||
323 | * stalling until it has filled by "looking at" r17. | ||
324 | */ | ||
325 | EX: { lw r19, r1; addi r1, r1, 4; move zero, r17 } /* r19 = WORD_5 */ | ||
326 | |||
327 | /* Store last word to the destination line, potentially dirtying it | ||
328 | * for the first time, which keeps the L2 busy for two cycles. | ||
329 | */ | ||
330 | EX: { sw r10, r12 } /* store(WORD_15) */ | ||
331 | |||
332 | /* Use two L1D hits to cover the sw L2 access above. */ | ||
333 | EX: { lw r10, r1; addi r1, r1, 4 } /* r10 = WORD_6 */ | ||
334 | EX: { lw r12, r1; addi r1, r1, 4 } /* r12 = WORD_7 */ | ||
335 | |||
336 | /* Fill third L1D line. */ | ||
337 | EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */ | ||
338 | |||
339 | /* Store first L1D line. */ | ||
340 | EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */ | ||
341 | EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */ | ||
342 | EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */ | ||
343 | #ifdef MEMCPY_USE_WH64 | ||
344 | EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */ | ||
345 | #else | ||
346 | /* Back up the r9 to a cache line we are already storing to | ||
347 | * if it gets past the end of the dest vector. Strictly speaking, | ||
348 | * we don't need to back up to the start of a cache line, but it's free | ||
349 | * and tidy, so why not? | ||
350 | */ | ||
351 | EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */ | ||
352 | #endif | ||
353 | /* Store second L1D line. */ | ||
354 | EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */ | ||
355 | EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */ | ||
356 | EX: { sw r0, r10; addi r0, r0, 4 } /* store(WORD_6) */ | ||
357 | EX: { sw r0, r12; addi r0, r0, 4 } /* store(WORD_7) */ | ||
358 | |||
359 | EX: { lw r13, r1; addi r1, r1, 4; move zero, r18 } /* r13 = WORD_9 */ | ||
360 | EX: { lw r14, r1; addi r1, r1, 4 } /* r14 = WORD_10 */ | ||
361 | EX: { lw r15, r1; move r1, r20 } /* r15 = WORD_11 */ | ||
362 | |||
363 | /* Store third L1D line. */ | ||
364 | EX: { sw r0, r18; addi r0, r0, 4 } /* store(WORD_8) */ | ||
365 | EX: { sw r0, r13; addi r0, r0, 4 } /* store(WORD_9) */ | ||
366 | EX: { sw r0, r14; addi r0, r0, 4 } /* store(WORD_10) */ | ||
367 | EX: { sw r0, r15; addi r0, r0, 4 } /* store(WORD_11) */ | ||
368 | |||
369 | /* Store rest of fourth L1D line. */ | ||
370 | EX: { sw r0, r4; addi r0, r0, 4 } /* store(WORD_12) */ | ||
371 | { | ||
372 | EX: sw r0, r8 /* store(WORD_13) */ | ||
373 | addi r0, r0, 4 | ||
374 | /* Will r2 be > 64 after we subtract 64 below? */ | ||
375 | shri r4, r2, 7 | ||
376 | } | ||
377 | { | ||
378 | EX: sw r0, r11 /* store(WORD_14) */ | ||
379 | addi r0, r0, 8 | ||
380 | /* Record 64 bytes successfully copied. */ | ||
381 | addi r2, r2, -64 | ||
382 | } | ||
383 | |||
384 | { jrp lr; move lr, r27 } | ||
385 | |||
386 | /* Convey to the backtrace library that the stack frame is size | ||
387 | * zero, and the real return address is on the stack rather than | ||
388 | * in 'lr'. | ||
389 | */ | ||
390 | { info 8 } | ||
391 | |||
392 | .align 64 | ||
393 | .Lcopy_unaligned_maybe_many: | ||
394 | /* Skip the setup overhead if we aren't copying many bytes. */ | ||
395 | { slti_u r8, r2, 20; sub r4, zero, r0 } | ||
396 | { bnzt r8, .Lcopy_unaligned_few; andi r4, r4, 3 } | ||
397 | { bz r4, .Ldest_is_word_aligned; add r18, r1, r2 } | ||
398 | |||
399 | /* | ||
400 | * | ||
401 | * unaligned 4 byte at a time copy handler. | ||
402 | * | ||
403 | */ | ||
404 | |||
405 | /* Copy single bytes until r0 == 0 mod 4, so we can store words. */ | ||
406 | .Lalign_dest_loop: | ||
407 | EX: { lb_u r3, r1; addi r1, r1, 1; addi r4, r4, -1 } | ||
408 | EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
409 | { bnzt r4, .Lalign_dest_loop; andi r3, r1, 3 } | ||
410 | |||
411 | /* If source and dest are now *both* aligned, do an aligned copy. */ | ||
412 | { bz r3, .Lcheck_aligned_copy_size; addli r4, r2, -256 } | ||
413 | |||
414 | .Ldest_is_word_aligned: | ||
415 | |||
416 | #if CHIP_HAS_DWORD_ALIGN() | ||
417 | EX: { andi r8, r0, 63; lwadd_na r6, r1, 4} | ||
418 | { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned } | ||
419 | |||
420 | /* This copies unaligned words until either there are fewer | ||
421 | * than 4 bytes left to copy, or until the destination pointer | ||
422 | * is cache-aligned, whichever comes first. | ||
423 | * | ||
424 | * On entry: | ||
425 | * - r0 is the next store address. | ||
426 | * - r1 points 4 bytes past the load address corresponding to r0. | ||
427 | * - r2 >= 4 | ||
428 | * - r6 is the next aligned word loaded. | ||
429 | */ | ||
430 | .Lcopy_unaligned_src_words: | ||
431 | EX: { lwadd_na r7, r1, 4; slti_u r8, r2, 4 + 4 } | ||
432 | /* stall */ | ||
433 | { dword_align r6, r7, r1; slti_u r9, r2, 64 + 4 } | ||
434 | EX: { swadd r0, r6, 4; addi r2, r2, -4 } | ||
435 | { bnz r8, .Lcleanup_unaligned_words; andi r8, r0, 63 } | ||
436 | { bnzt r8, .Lcopy_unaligned_src_words; move r6, r7 } | ||
437 | |||
438 | /* On entry: | ||
439 | * - r0 is the next store address. | ||
440 | * - r1 points 4 bytes past the load address corresponding to r0. | ||
441 | * - r2 >= 4 (# of bytes left to store). | ||
442 | * - r6 is the next aligned src word value. | ||
443 | * - r9 = (r2 < 64U). | ||
444 | * - r18 points one byte past the end of source memory. | ||
445 | */ | ||
446 | .Ldest_is_L2_line_aligned: | ||
447 | |||
448 | { | ||
449 | /* Not a full cache line remains. */ | ||
450 | bnz r9, .Lcleanup_unaligned_words | ||
451 | move r7, r6 | ||
452 | } | ||
453 | |||
454 | /* r2 >= 64 */ | ||
455 | |||
456 | /* Kick off two prefetches, but don't go past the end. */ | ||
457 | { addi r3, r1, 63 - 4; addi r8, r1, 64 + 63 - 4 } | ||
458 | { prefetch r3; move r3, r8; slt_u r8, r8, r18 } | ||
459 | { mvz r3, r8, r1; addi r8, r3, 64 } | ||
460 | { prefetch r3; move r3, r8; slt_u r8, r8, r18 } | ||
461 | { mvz r3, r8, r1; movei r17, 0 } | ||
462 | |||
463 | .Lcopy_unaligned_line: | ||
464 | /* Prefetch another line. */ | ||
465 | { prefetch r3; addi r15, r1, 60; addi r3, r3, 64 } | ||
466 | /* Fire off a load of the last word we are about to copy. */ | ||
467 | EX: { lw_na r15, r15; slt_u r8, r3, r18 } | ||
468 | |||
469 | EX: { mvz r3, r8, r1; wh64 r0 } | ||
470 | |||
471 | /* This loop runs twice. | ||
472 | * | ||
473 | * On entry: | ||
474 | * - r17 is even before the first iteration, and odd before | ||
475 | * the second. It is incremented inside the loop. Encountering | ||
476 | * an even value at the end of the loop makes it stop. | ||
477 | */ | ||
478 | .Lcopy_half_an_unaligned_line: | ||
479 | EX: { | ||
480 | /* Stall until the last byte is ready. In the steady state this | ||
481 | * guarantees all words to load below will be in the L2 cache, which | ||
482 | * avoids shunting the loads to the RTF. | ||
483 | */ | ||
484 | move zero, r15 | ||
485 | lwadd_na r7, r1, 16 | ||
486 | } | ||
487 | EX: { lwadd_na r11, r1, 12 } | ||
488 | EX: { lwadd_na r14, r1, -24 } | ||
489 | EX: { lwadd_na r8, r1, 4 } | ||
490 | EX: { lwadd_na r9, r1, 4 } | ||
491 | EX: { | ||
492 | lwadd_na r10, r1, 8 | ||
493 | /* r16 = (r2 < 64), after we subtract 32 from r2 below. */ | ||
494 | slti_u r16, r2, 64 + 32 | ||
495 | } | ||
496 | EX: { lwadd_na r12, r1, 4; addi r17, r17, 1 } | ||
497 | EX: { lwadd_na r13, r1, 8; dword_align r6, r7, r1 } | ||
498 | EX: { swadd r0, r6, 4; dword_align r7, r8, r1 } | ||
499 | EX: { swadd r0, r7, 4; dword_align r8, r9, r1 } | ||
500 | EX: { swadd r0, r8, 4; dword_align r9, r10, r1 } | ||
501 | EX: { swadd r0, r9, 4; dword_align r10, r11, r1 } | ||
502 | EX: { swadd r0, r10, 4; dword_align r11, r12, r1 } | ||
503 | EX: { swadd r0, r11, 4; dword_align r12, r13, r1 } | ||
504 | EX: { swadd r0, r12, 4; dword_align r13, r14, r1 } | ||
505 | EX: { swadd r0, r13, 4; addi r2, r2, -32 } | ||
506 | { move r6, r14; bbst r17, .Lcopy_half_an_unaligned_line } | ||
507 | |||
508 | { bzt r16, .Lcopy_unaligned_line; move r7, r6 } | ||
509 | |||
510 | /* On entry: | ||
511 | * - r0 is the next store address. | ||
512 | * - r1 points 4 bytes past the load address corresponding to r0. | ||
513 | * - r2 >= 0 (# of bytes left to store). | ||
514 | * - r7 is the next aligned src word value. | ||
515 | */ | ||
516 | .Lcleanup_unaligned_words: | ||
517 | /* Handle any trailing bytes. */ | ||
518 | { bz r2, .Lcopy_unaligned_done; slti_u r8, r2, 4 } | ||
519 | { bzt r8, .Lcopy_unaligned_src_words; move r6, r7 } | ||
520 | |||
521 | /* Move r1 back to the point where it corresponds to r0. */ | ||
522 | { addi r1, r1, -4 } | ||
523 | |||
524 | #else /* !CHIP_HAS_DWORD_ALIGN() */ | ||
525 | |||
526 | /* Compute right/left shift counts and load initial source words. */ | ||
527 | { andi r5, r1, -4; andi r3, r1, 3 } | ||
528 | EX: { lw r6, r5; addi r5, r5, 4; shli r3, r3, 3 } | ||
529 | EX: { lw r7, r5; addi r5, r5, 4; sub r4, zero, r3 } | ||
530 | |||
531 | /* Load and store one word at a time, using shifts and ORs | ||
532 | * to correct for the misaligned src. | ||
533 | */ | ||
534 | .Lcopy_unaligned_src_loop: | ||
535 | { shr r6, r6, r3; shl r8, r7, r4 } | ||
536 | EX: { lw r7, r5; or r8, r8, r6; move r6, r7 } | ||
537 | EX: { sw r0, r8; addi r0, r0, 4; addi r2, r2, -4 } | ||
538 | { addi r5, r5, 4; slti_u r8, r2, 8 } | ||
539 | { bzt r8, .Lcopy_unaligned_src_loop; addi r1, r1, 4 } | ||
540 | |||
541 | { bz r2, .Lcopy_unaligned_done } | ||
542 | #endif /* !CHIP_HAS_DWORD_ALIGN() */ | ||
543 | |||
544 | /* Fall through */ | ||
545 | |||
546 | /* | ||
547 | * | ||
548 | * 1 byte at a time copy handler. | ||
549 | * | ||
550 | */ | ||
551 | |||
552 | .Lcopy_unaligned_few: | ||
553 | EX: { lb_u r3, r1; addi r1, r1, 1 } | ||
554 | EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
555 | { bnzt r2, .Lcopy_unaligned_few } | ||
556 | |||
557 | .Lcopy_unaligned_done: | ||
558 | |||
559 | /* For memcpy return original dest address, else zero. */ | ||
560 | { mz r0, r29, r23; jrp lr } | ||
561 | |||
562 | .Lend_memcpy_common: | ||
563 | .size memcpy_common, .Lend_memcpy_common - memcpy_common | ||
564 | |||
565 | .section .fixup,"ax" | ||
566 | memcpy_common_fixup: | ||
567 | .type memcpy_common_fixup, @function | ||
568 | |||
569 | /* Skip any bytes we already successfully copied. | ||
570 | * r2 (num remaining) is correct, but r0 (dst) and r1 (src) | ||
571 | * may not be quite right because of unrolling and prefetching. | ||
572 | * So we need to recompute their values as the address just | ||
573 | * after the last byte we are sure was successfully loaded and | ||
574 | * then stored. | ||
575 | */ | ||
576 | |||
577 | /* Determine how many bytes we successfully copied. */ | ||
578 | { sub r3, r25, r2 } | ||
579 | |||
580 | /* Add this to the original r0 and r1 to get their new values. */ | ||
581 | { add r0, r23, r3; add r1, r24, r3 } | ||
582 | |||
583 | { bzt r29, memcpy_fixup_loop } | ||
584 | { blzt r29, copy_to_user_fixup_loop } | ||
585 | |||
586 | copy_from_user_fixup_loop: | ||
587 | /* Try copying the rest one byte at a time, expecting a load fault. */ | ||
588 | .Lcfu: { lb_u r3, r1; addi r1, r1, 1 } | ||
589 | { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
590 | { bnzt r2, copy_from_user_fixup_loop } | ||
591 | |||
592 | .Lcopy_from_user_fixup_zero_remainder: | ||
593 | { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ | ||
594 | /* byte-at-a-time loop faulted, so zero the rest. */ | ||
595 | { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ } | ||
596 | 1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 } | ||
597 | { bnzt r3, 1b } | ||
598 | 2: move lr, r27 | ||
599 | { move r0, r2; jrp lr } | ||
600 | |||
601 | copy_to_user_fixup_loop: | ||
602 | /* Try copying the rest one byte at a time, expecting a store fault. */ | ||
603 | { lb_u r3, r1; addi r1, r1, 1 } | ||
604 | .Lctu: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
605 | { bnzt r2, copy_to_user_fixup_loop } | ||
606 | .Lcopy_to_user_fixup_done: | ||
607 | move lr, r27 | ||
608 | { move r0, r2; jrp lr } | ||
609 | |||
610 | memcpy_fixup_loop: | ||
611 | /* Try copying the rest one byte at a time. We expect a disastrous | ||
612 | * fault to happen since we are in fixup code, but let it happen. | ||
613 | */ | ||
614 | { lb_u r3, r1; addi r1, r1, 1 } | ||
615 | { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 } | ||
616 | { bnzt r2, memcpy_fixup_loop } | ||
617 | /* This should be unreachable, we should have faulted again. | ||
618 | * But be paranoid and handle it in case some interrupt changed | ||
619 | * the TLB or something. | ||
620 | */ | ||
621 | move lr, r27 | ||
622 | { move r0, r23; jrp lr } | ||
623 | |||
624 | .size memcpy_common_fixup, . - memcpy_common_fixup | ||
625 | |||
626 | .section __ex_table,"a" | ||
627 | .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder | ||
628 | .word .Lctu, .Lcopy_to_user_fixup_done | ||
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c new file mode 100644 index 000000000000..4f0047342469 --- /dev/null +++ b/arch/tile/lib/memcpy_tile64.c | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/string.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | #include <asm/fixmap.h> | ||
20 | #include <asm/kmap_types.h> | ||
21 | #include <asm/tlbflush.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | #include <arch/chip.h> | ||
24 | |||
25 | |||
26 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
27 | |||
28 | /* Defined in memcpy.S */ | ||
29 | extern unsigned long __memcpy_asm(void *to, const void *from, unsigned long n); | ||
30 | extern unsigned long __copy_to_user_inatomic_asm( | ||
31 | void __user *to, const void *from, unsigned long n); | ||
32 | extern unsigned long __copy_from_user_inatomic_asm( | ||
33 | void *to, const void __user *from, unsigned long n); | ||
34 | extern unsigned long __copy_from_user_zeroing_asm( | ||
35 | void *to, const void __user *from, unsigned long n); | ||
36 | |||
37 | typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long); | ||
38 | |||
39 | /* Size above which to consider TLB games for performance */ | ||
40 | #define LARGE_COPY_CUTOFF 2048 | ||
41 | |||
42 | /* Communicate to the simulator what we are trying to do. */ | ||
43 | #define sim_allow_multiple_caching(b) \ | ||
44 | __insn_mtspr(SPR_SIM_CONTROL, \ | ||
45 | SIM_CONTROL_ALLOW_MULTIPLE_CACHING | ((b) << _SIM_CONTROL_OPERATOR_BITS)) | ||
46 | |||
47 | /* | ||
48 | * Copy memory by briefly enabling incoherent cacheline-at-a-time mode. | ||
49 | * | ||
50 | * We set up our own source and destination PTEs that we fully control. | ||
51 | * This is the only way to guarantee that we don't race with another | ||
52 | * thread that is modifying the PTE; we can't afford to try the | ||
53 | * copy_{to,from}_user() technique of catching the interrupt, since | ||
54 | * we must run with interrupts disabled to avoid the risk of some | ||
55 | * other code seeing the incoherent data in our cache. (Recall that | ||
56 | * our cache is indexed by PA, so even if the other code doesn't use | ||
57 | * our KM_MEMCPY virtual addresses, they'll still hit in cache using | ||
58 | * the normal VAs that aren't supposed to hit in cache.) | ||
59 | */ | ||
60 | static void memcpy_multicache(void *dest, const void *source, | ||
61 | pte_t dst_pte, pte_t src_pte, int len) | ||
62 | { | ||
63 | int idx, i; | ||
64 | unsigned long flags, newsrc, newdst, endsrc; | ||
65 | pmd_t *pmdp; | ||
66 | pte_t *ptep; | ||
67 | int cpu = get_cpu(); | ||
68 | |||
69 | /* | ||
70 | * Disable interrupts so that we don't recurse into memcpy() | ||
71 | * in an interrupt handler, nor accidentally reference | ||
72 | * the PA of the source from an interrupt routine. Also | ||
73 | * notify the simulator that we're playing games so we don't | ||
74 | * generate spurious coherency warnings. | ||
75 | */ | ||
76 | local_irq_save(flags); | ||
77 | sim_allow_multiple_caching(1); | ||
78 | |||
79 | /* Set up the new dest mapping */ | ||
80 | idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0; | ||
81 | newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1)); | ||
82 | pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst); | ||
83 | ptep = pte_offset_kernel(pmdp, newdst); | ||
84 | if (pte_val(*ptep) != pte_val(dst_pte)) { | ||
85 | set_pte(ptep, dst_pte); | ||
86 | local_flush_tlb_page(NULL, newdst, PAGE_SIZE); | ||
87 | } | ||
88 | |||
89 | /* Set up the new source mapping */ | ||
90 | idx += (KM_MEMCPY0 - KM_MEMCPY1); | ||
91 | src_pte = hv_pte_set_nc(src_pte); | ||
92 | src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */ | ||
93 | newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1)); | ||
94 | pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc); | ||
95 | ptep = pte_offset_kernel(pmdp, newsrc); | ||
96 | *ptep = src_pte; /* set_pte() would be confused by this */ | ||
97 | local_flush_tlb_page(NULL, newsrc, PAGE_SIZE); | ||
98 | |||
99 | /* Actually move the data. */ | ||
100 | __memcpy_asm((void *)newdst, (const void *)newsrc, len); | ||
101 | |||
102 | /* | ||
103 | * Remap the source as locally-cached and not OLOC'ed so that | ||
104 | * we can inval without also invaling the remote cpu's cache. | ||
105 | * This also avoids known errata with inv'ing cacheable oloc data. | ||
106 | */ | ||
107 | src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3); | ||
108 | src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */ | ||
109 | *ptep = src_pte; /* set_pte() would be confused by this */ | ||
110 | local_flush_tlb_page(NULL, newsrc, PAGE_SIZE); | ||
111 | |||
112 | /* | ||
113 | * Do the actual invalidation, covering the full L2 cache line | ||
114 | * at the end since __memcpy_asm() is somewhat aggressive. | ||
115 | */ | ||
116 | __inv_buffer((void *)newsrc, len); | ||
117 | |||
118 | /* | ||
119 | * We're done: notify the simulator that all is back to normal, | ||
120 | * and re-enable interrupts and pre-emption. | ||
121 | */ | ||
122 | sim_allow_multiple_caching(0); | ||
123 | local_irq_restore(flags); | ||
124 | put_cpu_no_resched(); | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Identify large copies from remotely-cached memory, and copy them | ||
129 | * via memcpy_multicache() if they look good, otherwise fall back | ||
130 | * to the particular kind of copying passed as the memcpy_t function. | ||
131 | */ | ||
132 | static unsigned long fast_copy(void *dest, const void *source, int len, | ||
133 | memcpy_t func) | ||
134 | { | ||
135 | /* | ||
136 | * Check if it's big enough to bother with. We may end up doing a | ||
137 | * small copy via TLB manipulation if we're near a page boundary, | ||
138 | * but presumably we'll make it up when we hit the second page. | ||
139 | */ | ||
140 | while (len >= LARGE_COPY_CUTOFF) { | ||
141 | int copy_size, bytes_left_on_page; | ||
142 | pte_t *src_ptep, *dst_ptep; | ||
143 | pte_t src_pte, dst_pte; | ||
144 | struct page *src_page, *dst_page; | ||
145 | |||
146 | /* Is the source page oloc'ed to a remote cpu? */ | ||
147 | retry_source: | ||
148 | src_ptep = virt_to_pte(current->mm, (unsigned long)source); | ||
149 | if (src_ptep == NULL) | ||
150 | break; | ||
151 | src_pte = *src_ptep; | ||
152 | if (!hv_pte_get_present(src_pte) || | ||
153 | !hv_pte_get_readable(src_pte) || | ||
154 | hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) | ||
155 | break; | ||
156 | if (get_remote_cache_cpu(src_pte) == smp_processor_id()) | ||
157 | break; | ||
158 | src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); | ||
159 | get_page(src_page); | ||
160 | if (pte_val(src_pte) != pte_val(*src_ptep)) { | ||
161 | put_page(src_page); | ||
162 | goto retry_source; | ||
163 | } | ||
164 | if (pte_huge(src_pte)) { | ||
165 | /* Adjust the PTE to correspond to a small page */ | ||
166 | int pfn = hv_pte_get_pfn(src_pte); | ||
167 | pfn += (((unsigned long)source & (HPAGE_SIZE-1)) | ||
168 | >> PAGE_SHIFT); | ||
169 | src_pte = pfn_pte(pfn, src_pte); | ||
170 | src_pte = pte_mksmall(src_pte); | ||
171 | } | ||
172 | |||
173 | /* Is the destination page writable? */ | ||
174 | retry_dest: | ||
175 | dst_ptep = virt_to_pte(current->mm, (unsigned long)dest); | ||
176 | if (dst_ptep == NULL) { | ||
177 | put_page(src_page); | ||
178 | break; | ||
179 | } | ||
180 | dst_pte = *dst_ptep; | ||
181 | if (!hv_pte_get_present(dst_pte) || | ||
182 | !hv_pte_get_writable(dst_pte)) { | ||
183 | put_page(src_page); | ||
184 | break; | ||
185 | } | ||
186 | dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); | ||
187 | if (dst_page == src_page) { | ||
188 | /* | ||
189 | * Source and dest are on the same page; this | ||
190 | * potentially exposes us to incoherence if any | ||
191 | * part of src and dest overlap on a cache line. | ||
192 | * Just give up rather than trying to be precise. | ||
193 | */ | ||
194 | put_page(src_page); | ||
195 | break; | ||
196 | } | ||
197 | get_page(dst_page); | ||
198 | if (pte_val(dst_pte) != pte_val(*dst_ptep)) { | ||
199 | put_page(dst_page); | ||
200 | goto retry_dest; | ||
201 | } | ||
202 | if (pte_huge(dst_pte)) { | ||
203 | /* Adjust the PTE to correspond to a small page */ | ||
204 | int pfn = hv_pte_get_pfn(dst_pte); | ||
205 | pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) | ||
206 | >> PAGE_SHIFT); | ||
207 | dst_pte = pfn_pte(pfn, dst_pte); | ||
208 | dst_pte = pte_mksmall(dst_pte); | ||
209 | } | ||
210 | |||
211 | /* All looks good: create a cachable PTE and copy from it */ | ||
212 | copy_size = len; | ||
213 | bytes_left_on_page = | ||
214 | PAGE_SIZE - (((int)source) & (PAGE_SIZE-1)); | ||
215 | if (copy_size > bytes_left_on_page) | ||
216 | copy_size = bytes_left_on_page; | ||
217 | bytes_left_on_page = | ||
218 | PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1)); | ||
219 | if (copy_size > bytes_left_on_page) | ||
220 | copy_size = bytes_left_on_page; | ||
221 | memcpy_multicache(dest, source, dst_pte, src_pte, copy_size); | ||
222 | |||
223 | /* Release the pages */ | ||
224 | put_page(dst_page); | ||
225 | put_page(src_page); | ||
226 | |||
227 | /* Continue on the next page */ | ||
228 | dest += copy_size; | ||
229 | source += copy_size; | ||
230 | len -= copy_size; | ||
231 | } | ||
232 | |||
233 | return func(dest, source, len); | ||
234 | } | ||
235 | |||
236 | void *memcpy(void *to, const void *from, __kernel_size_t n) | ||
237 | { | ||
238 | if (n < LARGE_COPY_CUTOFF) | ||
239 | return (void *)__memcpy_asm(to, from, n); | ||
240 | else | ||
241 | return (void *)fast_copy(to, from, n, __memcpy_asm); | ||
242 | } | ||
243 | |||
244 | unsigned long __copy_to_user_inatomic(void __user *to, const void *from, | ||
245 | unsigned long n) | ||
246 | { | ||
247 | if (n < LARGE_COPY_CUTOFF) | ||
248 | return __copy_to_user_inatomic_asm(to, from, n); | ||
249 | else | ||
250 | return fast_copy(to, from, n, __copy_to_user_inatomic_asm); | ||
251 | } | ||
252 | |||
253 | unsigned long __copy_from_user_inatomic(void *to, const void __user *from, | ||
254 | unsigned long n) | ||
255 | { | ||
256 | if (n < LARGE_COPY_CUTOFF) | ||
257 | return __copy_from_user_inatomic_asm(to, from, n); | ||
258 | else | ||
259 | return fast_copy(to, from, n, __copy_from_user_inatomic_asm); | ||
260 | } | ||
261 | |||
262 | unsigned long __copy_from_user_zeroing(void *to, const void __user *from, | ||
263 | unsigned long n) | ||
264 | { | ||
265 | if (n < LARGE_COPY_CUTOFF) | ||
266 | return __copy_from_user_zeroing_asm(to, from, n); | ||
267 | else | ||
268 | return fast_copy(to, from, n, __copy_from_user_zeroing_asm); | ||
269 | } | ||
270 | |||
271 | #endif /* !CHIP_HAS_COHERENT_LOCAL_CACHE() */ | ||
diff --git a/arch/tile/lib/memmove_32.c b/arch/tile/lib/memmove_32.c new file mode 100644 index 000000000000..f09d8c4523ec --- /dev/null +++ b/arch/tile/lib/memmove_32.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | void *memmove(void *dest, const void *src, size_t n) | ||
20 | { | ||
21 | if ((const char *)src >= (char *)dest + n | ||
22 | || (char *)dest >= (const char *)src + n) { | ||
23 | /* We found no overlap, so let memcpy do all the heavy | ||
24 | * lifting (prefetching, etc.) | ||
25 | */ | ||
26 | return memcpy(dest, src, n); | ||
27 | } | ||
28 | |||
29 | if (n != 0) { | ||
30 | const uint8_t *in; | ||
31 | uint8_t x; | ||
32 | uint8_t *out; | ||
33 | int stride; | ||
34 | |||
35 | if (src < dest) { | ||
36 | /* copy backwards */ | ||
37 | in = (const uint8_t *)src + n - 1; | ||
38 | out = (uint8_t *)dest + n - 1; | ||
39 | stride = -1; | ||
40 | } else { | ||
41 | /* copy forwards */ | ||
42 | in = (const uint8_t *)src; | ||
43 | out = (uint8_t *)dest; | ||
44 | stride = 1; | ||
45 | } | ||
46 | |||
47 | /* Manually software-pipeline this loop. */ | ||
48 | x = *in; | ||
49 | in += stride; | ||
50 | |||
51 | while (--n != 0) { | ||
52 | *out = x; | ||
53 | out += stride; | ||
54 | x = *in; | ||
55 | in += stride; | ||
56 | } | ||
57 | |||
58 | *out = x; | ||
59 | } | ||
60 | |||
61 | return dest; | ||
62 | } | ||
63 | EXPORT_SYMBOL(memmove); | ||
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c new file mode 100644 index 000000000000..8593bc82398a --- /dev/null +++ b/arch/tile/lib/memset_32.c | |||
@@ -0,0 +1,274 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <arch/chip.h> | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/module.h> | ||
20 | |||
21 | |||
22 | void *memset(void *s, int c, size_t n) | ||
23 | { | ||
24 | uint32_t *out32; | ||
25 | int n32; | ||
26 | uint32_t v16, v32; | ||
27 | uint8_t *out8 = s; | ||
28 | #if !CHIP_HAS_WH64() | ||
29 | int ahead32; | ||
30 | #else | ||
31 | int to_align32; | ||
32 | #endif | ||
33 | |||
34 | /* Experimentation shows that a trivial tight loop is a win up until | ||
35 | * around a size of 20, where writing a word at a time starts to win. | ||
36 | */ | ||
37 | #define BYTE_CUTOFF 20 | ||
38 | |||
39 | #if BYTE_CUTOFF < 3 | ||
40 | /* This must be at least at least this big, or some code later | ||
41 | * on doesn't work. | ||
42 | */ | ||
43 | #error "BYTE_CUTOFF is too small" | ||
44 | #endif | ||
45 | |||
46 | if (n < BYTE_CUTOFF) { | ||
47 | /* Strangely, this turns out to be the tightest way to | ||
48 | * write this loop. | ||
49 | */ | ||
50 | if (n != 0) { | ||
51 | do { | ||
52 | /* Strangely, combining these into one line | ||
53 | * performs worse. | ||
54 | */ | ||
55 | *out8 = c; | ||
56 | out8++; | ||
57 | } while (--n != 0); | ||
58 | } | ||
59 | |||
60 | return s; | ||
61 | } | ||
62 | |||
63 | #if !CHIP_HAS_WH64() | ||
64 | /* Use a spare issue slot to start prefetching the first cache | ||
65 | * line early. This instruction is free as the store can be buried | ||
66 | * in otherwise idle issue slots doing ALU ops. | ||
67 | */ | ||
68 | __insn_prefetch(out8); | ||
69 | |||
70 | /* We prefetch the end so that a short memset that spans two cache | ||
71 | * lines gets some prefetching benefit. Again we believe this is free | ||
72 | * to issue. | ||
73 | */ | ||
74 | __insn_prefetch(&out8[n - 1]); | ||
75 | #endif /* !CHIP_HAS_WH64() */ | ||
76 | |||
77 | |||
78 | /* Align 'out8'. We know n >= 3 so this won't write past the end. */ | ||
79 | while (((uintptr_t) out8 & 3) != 0) { | ||
80 | *out8++ = c; | ||
81 | --n; | ||
82 | } | ||
83 | |||
84 | /* Align 'n'. */ | ||
85 | while (n & 3) | ||
86 | out8[--n] = c; | ||
87 | |||
88 | out32 = (uint32_t *) out8; | ||
89 | n32 = n >> 2; | ||
90 | |||
91 | /* Tile input byte out to 32 bits. */ | ||
92 | v16 = __insn_intlb(c, c); | ||
93 | v32 = __insn_intlh(v16, v16); | ||
94 | |||
95 | /* This must be at least 8 or the following loop doesn't work. */ | ||
96 | #define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4) | ||
97 | |||
98 | #if !CHIP_HAS_WH64() | ||
99 | |||
100 | ahead32 = CACHE_LINE_SIZE_IN_WORDS; | ||
101 | |||
102 | /* We already prefetched the first and last cache lines, so | ||
103 | * we only need to do more prefetching if we are storing | ||
104 | * to more than two cache lines. | ||
105 | */ | ||
106 | if (n32 > CACHE_LINE_SIZE_IN_WORDS * 2) { | ||
107 | int i; | ||
108 | |||
109 | /* Prefetch the next several cache lines. | ||
110 | * This is the setup code for the software-pipelined | ||
111 | * loop below. | ||
112 | */ | ||
113 | #define MAX_PREFETCH 5 | ||
114 | ahead32 = n32 & -CACHE_LINE_SIZE_IN_WORDS; | ||
115 | if (ahead32 > MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS) | ||
116 | ahead32 = MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS; | ||
117 | |||
118 | for (i = CACHE_LINE_SIZE_IN_WORDS; | ||
119 | i < ahead32; i += CACHE_LINE_SIZE_IN_WORDS) | ||
120 | __insn_prefetch(&out32[i]); | ||
121 | } | ||
122 | |||
123 | if (n32 > ahead32) { | ||
124 | while (1) { | ||
125 | int j; | ||
126 | |||
127 | /* Prefetch by reading one word several cache lines | ||
128 | * ahead. Since loads are non-blocking this will | ||
129 | * cause the full cache line to be read while we are | ||
130 | * finishing earlier cache lines. Using a store | ||
131 | * here causes microarchitectural performance | ||
132 | * problems where a victimizing store miss goes to | ||
133 | * the head of the retry FIFO and locks the pipe for | ||
134 | * a few cycles. So a few subsequent stores in this | ||
135 | * loop go into the retry FIFO, and then later | ||
136 | * stores see other stores to the same cache line | ||
137 | * are already in the retry FIFO and themselves go | ||
138 | * into the retry FIFO, filling it up and grinding | ||
139 | * to a halt waiting for the original miss to be | ||
140 | * satisfied. | ||
141 | */ | ||
142 | __insn_prefetch(&out32[ahead32]); | ||
143 | |||
144 | #if 1 | ||
145 | #if CACHE_LINE_SIZE_IN_WORDS % 4 != 0 | ||
146 | #error "Unhandled CACHE_LINE_SIZE_IN_WORDS" | ||
147 | #endif | ||
148 | |||
149 | n32 -= CACHE_LINE_SIZE_IN_WORDS; | ||
150 | |||
151 | /* Save icache space by only partially unrolling | ||
152 | * this loop. | ||
153 | */ | ||
154 | for (j = CACHE_LINE_SIZE_IN_WORDS / 4; j > 0; j--) { | ||
155 | *out32++ = v32; | ||
156 | *out32++ = v32; | ||
157 | *out32++ = v32; | ||
158 | *out32++ = v32; | ||
159 | } | ||
160 | #else | ||
161 | /* Unfortunately, due to a code generator flaw this | ||
162 | * allocates a separate register for each of these | ||
163 | * stores, which requires a large number of spills, | ||
164 | * which makes this procedure enormously bigger | ||
165 | * (something like 70%) | ||
166 | */ | ||
167 | *out32++ = v32; | ||
168 | *out32++ = v32; | ||
169 | *out32++ = v32; | ||
170 | *out32++ = v32; | ||
171 | *out32++ = v32; | ||
172 | *out32++ = v32; | ||
173 | *out32++ = v32; | ||
174 | *out32++ = v32; | ||
175 | *out32++ = v32; | ||
176 | *out32++ = v32; | ||
177 | *out32++ = v32; | ||
178 | *out32++ = v32; | ||
179 | *out32++ = v32; | ||
180 | *out32++ = v32; | ||
181 | *out32++ = v32; | ||
182 | n32 -= 16; | ||
183 | #endif | ||
184 | |||
185 | /* To save compiled code size, reuse this loop even | ||
186 | * when we run out of prefetching to do by dropping | ||
187 | * ahead32 down. | ||
188 | */ | ||
189 | if (n32 <= ahead32) { | ||
190 | /* Not even a full cache line left, | ||
191 | * so stop now. | ||
192 | */ | ||
193 | if (n32 < CACHE_LINE_SIZE_IN_WORDS) | ||
194 | break; | ||
195 | |||
196 | /* Choose a small enough value that we don't | ||
197 | * prefetch past the end. There's no sense | ||
198 | * in touching cache lines we don't have to. | ||
199 | */ | ||
200 | ahead32 = CACHE_LINE_SIZE_IN_WORDS - 1; | ||
201 | } | ||
202 | } | ||
203 | } | ||
204 | |||
205 | #else /* CHIP_HAS_WH64() */ | ||
206 | |||
207 | /* Determine how many words we need to emit before the 'out32' | ||
208 | * pointer becomes aligned modulo the cache line size. | ||
209 | */ | ||
210 | to_align32 = | ||
211 | (-((uintptr_t)out32 >> 2)) & (CACHE_LINE_SIZE_IN_WORDS - 1); | ||
212 | |||
213 | /* Only bother aligning and using wh64 if there is at least | ||
214 | * one full cache line to process. This check also prevents | ||
215 | * overrunning the end of the buffer with alignment words. | ||
216 | */ | ||
217 | if (to_align32 <= n32 - CACHE_LINE_SIZE_IN_WORDS) { | ||
218 | int lines_left; | ||
219 | |||
220 | /* Align out32 mod the cache line size so we can use wh64. */ | ||
221 | n32 -= to_align32; | ||
222 | for (; to_align32 != 0; to_align32--) { | ||
223 | *out32 = v32; | ||
224 | out32++; | ||
225 | } | ||
226 | |||
227 | /* Use unsigned divide to turn this into a right shift. */ | ||
228 | lines_left = (unsigned)n32 / CACHE_LINE_SIZE_IN_WORDS; | ||
229 | |||
230 | do { | ||
231 | /* Only wh64 a few lines at a time, so we don't | ||
232 | * exceed the maximum number of victim lines. | ||
233 | */ | ||
234 | int x = ((lines_left < CHIP_MAX_OUTSTANDING_VICTIMS()) | ||
235 | ? lines_left | ||
236 | : CHIP_MAX_OUTSTANDING_VICTIMS()); | ||
237 | uint32_t *wh = out32; | ||
238 | int i = x; | ||
239 | int j; | ||
240 | |||
241 | lines_left -= x; | ||
242 | |||
243 | do { | ||
244 | __insn_wh64(wh); | ||
245 | wh += CACHE_LINE_SIZE_IN_WORDS; | ||
246 | } while (--i); | ||
247 | |||
248 | for (j = x * (CACHE_LINE_SIZE_IN_WORDS / 4); j != 0; j--) { | ||
249 | *out32++ = v32; | ||
250 | *out32++ = v32; | ||
251 | *out32++ = v32; | ||
252 | *out32++ = v32; | ||
253 | } | ||
254 | } while (lines_left != 0); | ||
255 | |||
256 | /* We processed all full lines above, so only this many | ||
257 | * words remain to be processed. | ||
258 | */ | ||
259 | n32 &= CACHE_LINE_SIZE_IN_WORDS - 1; | ||
260 | } | ||
261 | |||
262 | #endif /* CHIP_HAS_WH64() */ | ||
263 | |||
264 | /* Now handle any leftover values. */ | ||
265 | if (n32 != 0) { | ||
266 | do { | ||
267 | *out32 = v32; | ||
268 | out32++; | ||
269 | } while (--n32 != 0); | ||
270 | } | ||
271 | |||
272 | return s; | ||
273 | } | ||
274 | EXPORT_SYMBOL(memset); | ||
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c new file mode 100644 index 000000000000..485e24d62c6b --- /dev/null +++ b/arch/tile/lib/spinlock_32.c | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <asm/processor.h> | ||
18 | |||
19 | #include "spinlock_common.h" | ||
20 | |||
21 | void arch_spin_lock(arch_spinlock_t *lock) | ||
22 | { | ||
23 | int my_ticket; | ||
24 | int iterations = 0; | ||
25 | int delta; | ||
26 | |||
27 | while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1) | ||
28 | delay_backoff(iterations++); | ||
29 | |||
30 | /* Increment the next ticket number, implicitly releasing tns lock. */ | ||
31 | lock->next_ticket = my_ticket + TICKET_QUANTUM; | ||
32 | |||
33 | /* Wait until it's our turn. */ | ||
34 | while ((delta = my_ticket - lock->current_ticket) != 0) | ||
35 | relax((128 / CYCLES_PER_RELAX_LOOP) * delta); | ||
36 | } | ||
37 | EXPORT_SYMBOL(arch_spin_lock); | ||
38 | |||
39 | int arch_spin_trylock(arch_spinlock_t *lock) | ||
40 | { | ||
41 | /* | ||
42 | * Grab a ticket; no need to retry if it's busy, we'll just | ||
43 | * treat that the same as "locked", since someone else | ||
44 | * will lock it momentarily anyway. | ||
45 | */ | ||
46 | int my_ticket = __insn_tns((void *)&lock->next_ticket); | ||
47 | |||
48 | if (my_ticket == lock->current_ticket) { | ||
49 | /* Not currently locked, so lock it by keeping this ticket. */ | ||
50 | lock->next_ticket = my_ticket + TICKET_QUANTUM; | ||
51 | /* Success! */ | ||
52 | return 1; | ||
53 | } | ||
54 | |||
55 | if (!(my_ticket & 1)) { | ||
56 | /* Release next_ticket. */ | ||
57 | lock->next_ticket = my_ticket; | ||
58 | } | ||
59 | |||
60 | return 0; | ||
61 | } | ||
62 | EXPORT_SYMBOL(arch_spin_trylock); | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
65 | { | ||
66 | u32 iterations = 0; | ||
67 | while (arch_spin_is_locked(lock)) | ||
68 | delay_backoff(iterations++); | ||
69 | } | ||
70 | EXPORT_SYMBOL(arch_spin_unlock_wait); | ||
71 | |||
72 | /* | ||
73 | * The low byte is always reserved to be the marker for a "tns" operation | ||
74 | * since the low bit is set to "1" by a tns. The next seven bits are | ||
75 | * zeroes. The next byte holds the "next" writer value, i.e. the ticket | ||
76 | * available for the next task that wants to write. The third byte holds | ||
77 | * the current writer value, i.e. the writer who holds the current ticket. | ||
78 | * If current == next == 0, there are no interested writers. | ||
79 | */ | ||
80 | #define WR_NEXT_SHIFT _WR_NEXT_SHIFT | ||
81 | #define WR_CURR_SHIFT _WR_CURR_SHIFT | ||
82 | #define WR_WIDTH _WR_WIDTH | ||
83 | #define WR_MASK ((1 << WR_WIDTH) - 1) | ||
84 | |||
85 | /* | ||
86 | * The last eight bits hold the active reader count. This has to be | ||
87 | * zero before a writer can start to write. | ||
88 | */ | ||
89 | #define RD_COUNT_SHIFT _RD_COUNT_SHIFT | ||
90 | #define RD_COUNT_WIDTH _RD_COUNT_WIDTH | ||
91 | #define RD_COUNT_MASK ((1 << RD_COUNT_WIDTH) - 1) | ||
92 | |||
93 | |||
94 | /* Lock the word, spinning until there are no tns-ers. */ | ||
95 | static inline u32 get_rwlock(arch_rwlock_t *rwlock) | ||
96 | { | ||
97 | u32 iterations = 0; | ||
98 | for (;;) { | ||
99 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
100 | if (unlikely(val & 1)) { | ||
101 | delay_backoff(iterations++); | ||
102 | continue; | ||
103 | } | ||
104 | return val; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | int arch_read_trylock_slow(arch_rwlock_t *rwlock) | ||
109 | { | ||
110 | u32 val = get_rwlock(rwlock); | ||
111 | int locked = (val << RD_COUNT_WIDTH) == 0; | ||
112 | rwlock->lock = val + (locked << RD_COUNT_SHIFT); | ||
113 | return locked; | ||
114 | } | ||
115 | EXPORT_SYMBOL(arch_read_trylock_slow); | ||
116 | |||
117 | void arch_read_unlock_slow(arch_rwlock_t *rwlock) | ||
118 | { | ||
119 | u32 val = get_rwlock(rwlock); | ||
120 | rwlock->lock = val - (1 << RD_COUNT_SHIFT); | ||
121 | } | ||
122 | EXPORT_SYMBOL(arch_read_unlock_slow); | ||
123 | |||
124 | void arch_write_unlock_slow(arch_rwlock_t *rwlock, u32 val) | ||
125 | { | ||
126 | u32 eq, mask = 1 << WR_CURR_SHIFT; | ||
127 | while (unlikely(val & 1)) { | ||
128 | /* Limited backoff since we are the highest-priority task. */ | ||
129 | relax(4); | ||
130 | val = __insn_tns((int *)&rwlock->lock); | ||
131 | } | ||
132 | val = __insn_addb(val, mask); | ||
133 | eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT)); | ||
134 | val = __insn_mz(eq & mask, val); | ||
135 | rwlock->lock = val; | ||
136 | } | ||
137 | EXPORT_SYMBOL(arch_write_unlock_slow); | ||
138 | |||
139 | /* | ||
140 | * We spin until everything but the reader bits (which are in the high | ||
141 | * part of the word) are zero, i.e. no active or waiting writers, no tns. | ||
142 | * | ||
143 | * ISSUE: This approach can permanently starve readers. A reader who sees | ||
144 | * a writer could instead take a ticket lock (just like a writer would), | ||
145 | * and atomically enter read mode (with 1 reader) when it gets the ticket. | ||
146 | * This way both readers and writers will always make forward progress | ||
147 | * in a finite time. | ||
148 | */ | ||
149 | void arch_read_lock_slow(arch_rwlock_t *rwlock, u32 val) | ||
150 | { | ||
151 | u32 iterations = 0; | ||
152 | do { | ||
153 | if (!(val & 1)) | ||
154 | rwlock->lock = val; | ||
155 | delay_backoff(iterations++); | ||
156 | val = __insn_tns((int *)&rwlock->lock); | ||
157 | } while ((val << RD_COUNT_WIDTH) != 0); | ||
158 | rwlock->lock = val + (1 << RD_COUNT_SHIFT); | ||
159 | } | ||
160 | EXPORT_SYMBOL(arch_read_lock_slow); | ||
161 | |||
162 | void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val) | ||
163 | { | ||
164 | /* | ||
165 | * The trailing underscore on this variable (and curr_ below) | ||
166 | * reminds us that the high bits are garbage; we mask them out | ||
167 | * when we compare them. | ||
168 | */ | ||
169 | u32 my_ticket_; | ||
170 | |||
171 | /* Take out the next ticket; this will also stop would-be readers. */ | ||
172 | if (val & 1) | ||
173 | val = get_rwlock(rwlock); | ||
174 | rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); | ||
175 | |||
176 | /* Extract my ticket value from the original word. */ | ||
177 | my_ticket_ = val >> WR_NEXT_SHIFT; | ||
178 | |||
179 | /* | ||
180 | * Wait until the "current" field matches our ticket, and | ||
181 | * there are no remaining readers. | ||
182 | */ | ||
183 | for (;;) { | ||
184 | u32 curr_ = val >> WR_CURR_SHIFT; | ||
185 | u32 readers = val >> RD_COUNT_SHIFT; | ||
186 | u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers; | ||
187 | if (likely(delta == 0)) | ||
188 | break; | ||
189 | |||
190 | /* Delay based on how many lock-holders are still out there. */ | ||
191 | relax((256 / CYCLES_PER_RELAX_LOOP) * delta); | ||
192 | |||
193 | /* | ||
194 | * Get a non-tns value to check; we don't need to tns | ||
195 | * it ourselves. Since we're not tns'ing, we retry | ||
196 | * more rapidly to get a valid value. | ||
197 | */ | ||
198 | while ((val = rwlock->lock) & 1) | ||
199 | relax(4); | ||
200 | } | ||
201 | } | ||
202 | EXPORT_SYMBOL(arch_write_lock_slow); | ||
203 | |||
204 | int __tns_atomic_acquire(atomic_t *lock) | ||
205 | { | ||
206 | int ret; | ||
207 | u32 iterations = 0; | ||
208 | |||
209 | BUG_ON(__insn_mfspr(SPR_INTERRUPT_CRITICAL_SECTION)); | ||
210 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); | ||
211 | |||
212 | while ((ret = __insn_tns((void *)&lock->counter)) == 1) | ||
213 | delay_backoff(iterations++); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
217 | void __tns_atomic_release(atomic_t *p, int v) | ||
218 | { | ||
219 | p->counter = v; | ||
220 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); | ||
221 | } | ||
diff --git a/arch/tile/lib/spinlock_common.h b/arch/tile/lib/spinlock_common.h new file mode 100644 index 000000000000..8dffebde6630 --- /dev/null +++ b/arch/tile/lib/spinlock_common.h | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * This file is included into spinlock_32.c or _64.c. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * The mfspr in __spinlock_relax() is 5 or 6 cycles plus 2 for loop | ||
18 | * overhead. | ||
19 | */ | ||
20 | #ifdef __tilegx__ | ||
21 | #define CYCLES_PER_RELAX_LOOP 7 | ||
22 | #else | ||
23 | #define CYCLES_PER_RELAX_LOOP 8 | ||
24 | #endif | ||
25 | |||
26 | /* | ||
27 | * Idle the core for CYCLES_PER_RELAX_LOOP * iterations cycles. | ||
28 | */ | ||
29 | static inline void | ||
30 | relax(int iterations) | ||
31 | { | ||
32 | for (/*above*/; iterations > 0; iterations--) | ||
33 | __insn_mfspr(SPR_PASS); | ||
34 | barrier(); | ||
35 | } | ||
36 | |||
37 | /* Perform bounded exponential backoff.*/ | ||
38 | void delay_backoff(int iterations) | ||
39 | { | ||
40 | u32 exponent, loops; | ||
41 | |||
42 | /* | ||
43 | * 2^exponent is how many times we go around the loop, | ||
44 | * which takes 8 cycles. We want to start with a 16- to 31-cycle | ||
45 | * loop, so we need to go around minimum 2 = 2^1 times, so we | ||
46 | * bias the original value up by 1. | ||
47 | */ | ||
48 | exponent = iterations + 1; | ||
49 | |||
50 | /* | ||
51 | * Don't allow exponent to exceed 7, so we have 128 loops, | ||
52 | * or 1,024 (to 2,047) cycles, as our maximum. | ||
53 | */ | ||
54 | if (exponent > 8) | ||
55 | exponent = 8; | ||
56 | |||
57 | loops = 1 << exponent; | ||
58 | |||
59 | /* Add a randomness factor so two cpus never get in lock step. */ | ||
60 | loops += __insn_crc32_32(stack_pointer, get_cycles_low()) & | ||
61 | (loops - 1); | ||
62 | |||
63 | relax(1 << exponent); | ||
64 | } | ||
diff --git a/arch/tile/lib/strchr_32.c b/arch/tile/lib/strchr_32.c new file mode 100644 index 000000000000..c94e6f7ae7b5 --- /dev/null +++ b/arch/tile/lib/strchr_32.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | #undef strchr | ||
20 | |||
21 | char *strchr(const char *s, int c) | ||
22 | { | ||
23 | int z, g; | ||
24 | |||
25 | /* Get an aligned pointer. */ | ||
26 | const uintptr_t s_int = (uintptr_t) s; | ||
27 | const uint32_t *p = (const uint32_t *)(s_int & -4); | ||
28 | |||
29 | /* Create four copies of the byte for which we are looking. */ | ||
30 | const uint32_t goal = 0x01010101 * (uint8_t) c; | ||
31 | |||
32 | /* Read the first aligned word, but force bytes before the string to | ||
33 | * match neither zero nor goal (we make sure the high bit of each | ||
34 | * byte is 1, and the low 7 bits are all the opposite of the goal | ||
35 | * byte). | ||
36 | * | ||
37 | * Note that this shift count expression works because we know shift | ||
38 | * counts are taken mod 32. | ||
39 | */ | ||
40 | const uint32_t before_mask = (1 << (s_int << 3)) - 1; | ||
41 | uint32_t v = (*p | before_mask) ^ (goal & __insn_shrib(before_mask, 1)); | ||
42 | |||
43 | uint32_t zero_matches, goal_matches; | ||
44 | while (1) { | ||
45 | /* Look for a terminating '\0'. */ | ||
46 | zero_matches = __insn_seqb(v, 0); | ||
47 | |||
48 | /* Look for the goal byte. */ | ||
49 | goal_matches = __insn_seqb(v, goal); | ||
50 | |||
51 | if (__builtin_expect(zero_matches | goal_matches, 0)) | ||
52 | break; | ||
53 | |||
54 | v = *++p; | ||
55 | } | ||
56 | |||
57 | z = __insn_ctz(zero_matches); | ||
58 | g = __insn_ctz(goal_matches); | ||
59 | |||
60 | /* If we found c before '\0' we got a match. Note that if c == '\0' | ||
61 | * then g == z, and we correctly return the address of the '\0' | ||
62 | * rather than NULL. | ||
63 | */ | ||
64 | return (g <= z) ? ((char *)p) + (g >> 3) : NULL; | ||
65 | } | ||
66 | EXPORT_SYMBOL(strchr); | ||
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c new file mode 100644 index 000000000000..f26f88e11e4a --- /dev/null +++ b/arch/tile/lib/strlen_32.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/module.h> | ||
18 | |||
19 | size_t strlen(const char *s) | ||
20 | { | ||
21 | /* Get an aligned pointer. */ | ||
22 | const uintptr_t s_int = (uintptr_t) s; | ||
23 | const uint32_t *p = (const uint32_t *)(s_int & -4); | ||
24 | |||
25 | /* Read the first word, but force bytes before the string to be nonzero. | ||
26 | * This expression works because we know shift counts are taken mod 32. | ||
27 | */ | ||
28 | uint32_t v = *p | ((1 << (s_int << 3)) - 1); | ||
29 | |||
30 | uint32_t bits; | ||
31 | while ((bits = __insn_seqb(v, 0)) == 0) | ||
32 | v = *++p; | ||
33 | |||
34 | return ((const char *)p) + (__insn_ctz(bits) >> 3) - s; | ||
35 | } | ||
36 | EXPORT_SYMBOL(strlen); | ||
diff --git a/arch/tile/lib/uaccess.c b/arch/tile/lib/uaccess.c new file mode 100644 index 000000000000..9ae182568b77 --- /dev/null +++ b/arch/tile/lib/uaccess.c | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/uaccess.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | int __range_ok(unsigned long addr, unsigned long size) | ||
19 | { | ||
20 | unsigned long limit = current_thread_info()->addr_limit.seg; | ||
21 | __chk_user_ptr(addr); | ||
22 | return !((addr < limit && size <= limit - addr) || | ||
23 | is_arch_mappable_range(addr, size)); | ||
24 | } | ||
25 | EXPORT_SYMBOL(__range_ok); | ||
26 | |||
27 | void copy_from_user_overflow(void) | ||
28 | { | ||
29 | WARN(1, "Buffer overflow detected!\n"); | ||
30 | } | ||
31 | EXPORT_SYMBOL(copy_from_user_overflow); | ||
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S new file mode 100644 index 000000000000..979f76d83746 --- /dev/null +++ b/arch/tile/lib/usercopy_32.S | |||
@@ -0,0 +1,223 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <asm/cache.h> | ||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* Access user memory, but use MMU to avoid propagating kernel exceptions. */ | ||
21 | |||
22 | .pushsection .fixup,"ax" | ||
23 | |||
24 | get_user_fault: | ||
25 | { move r0, zero; move r1, zero } | ||
26 | { movei r2, -EFAULT; jrp lr } | ||
27 | ENDPROC(get_user_fault) | ||
28 | |||
29 | put_user_fault: | ||
30 | { movei r0, -EFAULT; jrp lr } | ||
31 | ENDPROC(put_user_fault) | ||
32 | |||
33 | .popsection | ||
34 | |||
35 | /* | ||
36 | * __get_user_N functions take a pointer in r0, and return 0 in r2 | ||
37 | * on success, with the value in r0; or else -EFAULT in r2. | ||
38 | */ | ||
39 | #define __get_user_N(bytes, LOAD) \ | ||
40 | STD_ENTRY(__get_user_##bytes); \ | ||
41 | 1: { LOAD r0, r0; move r1, zero; move r2, zero }; \ | ||
42 | jrp lr; \ | ||
43 | STD_ENDPROC(__get_user_##bytes); \ | ||
44 | .pushsection __ex_table,"a"; \ | ||
45 | .word 1b, get_user_fault; \ | ||
46 | .popsection | ||
47 | |||
48 | __get_user_N(1, lb_u) | ||
49 | __get_user_N(2, lh_u) | ||
50 | __get_user_N(4, lw) | ||
51 | |||
52 | /* | ||
53 | * __get_user_8 takes a pointer in r0, and returns 0 in r2 | ||
54 | * on success, with the value in r0/r1; or else -EFAULT in r2. | ||
55 | */ | ||
56 | STD_ENTRY(__get_user_8); | ||
57 | 1: { lw r0, r0; addi r1, r0, 4 }; | ||
58 | 2: { lw r1, r1; move r2, zero }; | ||
59 | jrp lr; | ||
60 | STD_ENDPROC(__get_user_8); | ||
61 | .pushsection __ex_table,"a"; | ||
62 | .word 1b, get_user_fault; | ||
63 | .word 2b, get_user_fault; | ||
64 | .popsection | ||
65 | |||
66 | /* | ||
67 | * __put_user_N functions take a value in r0 and a pointer in r1, | ||
68 | * and return 0 in r0 on success or -EFAULT on failure. | ||
69 | */ | ||
70 | #define __put_user_N(bytes, STORE) \ | ||
71 | STD_ENTRY(__put_user_##bytes); \ | ||
72 | 1: { STORE r1, r0; move r0, zero }; \ | ||
73 | jrp lr; \ | ||
74 | STD_ENDPROC(__put_user_##bytes); \ | ||
75 | .pushsection __ex_table,"a"; \ | ||
76 | .word 1b, put_user_fault; \ | ||
77 | .popsection | ||
78 | |||
79 | __put_user_N(1, sb) | ||
80 | __put_user_N(2, sh) | ||
81 | __put_user_N(4, sw) | ||
82 | |||
83 | /* | ||
84 | * __put_user_8 takes a value in r0/r1 and a pointer in r2, | ||
85 | * and returns 0 in r0 on success or -EFAULT on failure. | ||
86 | */ | ||
87 | STD_ENTRY(__put_user_8) | ||
88 | 1: { sw r2, r0; addi r2, r2, 4 } | ||
89 | 2: { sw r2, r1; move r0, zero } | ||
90 | jrp lr | ||
91 | STD_ENDPROC(__put_user_8) | ||
92 | .pushsection __ex_table,"a" | ||
93 | .word 1b, put_user_fault | ||
94 | .word 2b, put_user_fault | ||
95 | .popsection | ||
96 | |||
97 | |||
98 | /* | ||
99 | * strnlen_user_asm takes the pointer in r0, and the length bound in r1. | ||
100 | * It returns the length, including the terminating NUL, or zero on exception. | ||
101 | * If length is greater than the bound, returns one plus the bound. | ||
102 | */ | ||
103 | STD_ENTRY(strnlen_user_asm) | ||
104 | { bz r1, 2f; addi r3, r0, -1 } /* bias down to include NUL */ | ||
105 | 1: { lb_u r4, r0; addi r1, r1, -1 } | ||
106 | bz r4, 2f | ||
107 | { bnzt r1, 1b; addi r0, r0, 1 } | ||
108 | 2: { sub r0, r0, r3; jrp lr } | ||
109 | STD_ENDPROC(strnlen_user_asm) | ||
110 | .pushsection .fixup,"ax" | ||
111 | strnlen_user_fault: | ||
112 | { move r0, zero; jrp lr } | ||
113 | ENDPROC(strnlen_user_fault) | ||
114 | .section __ex_table,"a" | ||
115 | .word 1b, strnlen_user_fault | ||
116 | .popsection | ||
117 | |||
118 | /* | ||
119 | * strncpy_from_user_asm takes the kernel target pointer in r0, | ||
120 | * the userspace source pointer in r1, and the length bound (including | ||
121 | * the trailing NUL) in r2. On success, it returns the string length | ||
122 | * (not including the trailing NUL), or -EFAULT on failure. | ||
123 | */ | ||
124 | STD_ENTRY(strncpy_from_user_asm) | ||
125 | { bz r2, 2f; move r3, r0 } | ||
126 | 1: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } | ||
127 | { sb r0, r4; addi r0, r0, 1 } | ||
128 | bz r2, 2f | ||
129 | bnzt r4, 1b | ||
130 | addi r0, r0, -1 /* don't count the trailing NUL */ | ||
131 | 2: { sub r0, r0, r3; jrp lr } | ||
132 | STD_ENDPROC(strncpy_from_user_asm) | ||
133 | .pushsection .fixup,"ax" | ||
134 | strncpy_from_user_fault: | ||
135 | { movei r0, -EFAULT; jrp lr } | ||
136 | ENDPROC(strncpy_from_user_fault) | ||
137 | .section __ex_table,"a" | ||
138 | .word 1b, strncpy_from_user_fault | ||
139 | .popsection | ||
140 | |||
141 | /* | ||
142 | * clear_user_asm takes the user target address in r0 and the | ||
143 | * number of bytes to zero in r1. | ||
144 | * It returns the number of uncopiable bytes (hopefully zero) in r0. | ||
145 | * Note that we don't use a separate .fixup section here since we fall | ||
146 | * through into the "fixup" code as the last straight-line bundle anyway. | ||
147 | */ | ||
148 | STD_ENTRY(clear_user_asm) | ||
149 | { bz r1, 2f; or r2, r0, r1 } | ||
150 | andi r2, r2, 3 | ||
151 | bzt r2, .Lclear_aligned_user_asm | ||
152 | 1: { sb r0, zero; addi r0, r0, 1; addi r1, r1, -1 } | ||
153 | bnzt r1, 1b | ||
154 | 2: { move r0, r1; jrp lr } | ||
155 | .pushsection __ex_table,"a" | ||
156 | .word 1b, 2b | ||
157 | .popsection | ||
158 | |||
159 | .Lclear_aligned_user_asm: | ||
160 | 1: { sw r0, zero; addi r0, r0, 4; addi r1, r1, -4 } | ||
161 | bnzt r1, 1b | ||
162 | 2: { move r0, r1; jrp lr } | ||
163 | STD_ENDPROC(clear_user_asm) | ||
164 | .pushsection __ex_table,"a" | ||
165 | .word 1b, 2b | ||
166 | .popsection | ||
167 | |||
168 | /* | ||
169 | * flush_user_asm takes the user target address in r0 and the | ||
170 | * number of bytes to flush in r1. | ||
171 | * It returns the number of unflushable bytes (hopefully zero) in r0. | ||
172 | */ | ||
173 | STD_ENTRY(flush_user_asm) | ||
174 | bz r1, 2f | ||
175 | { movei r2, L2_CACHE_BYTES; add r1, r0, r1 } | ||
176 | { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 } | ||
177 | { and r0, r0, r2; and r1, r1, r2 } | ||
178 | { sub r1, r1, r0 } | ||
179 | 1: { flush r0; addi r1, r1, -CHIP_FLUSH_STRIDE() } | ||
180 | { addi r0, r0, CHIP_FLUSH_STRIDE(); bnzt r1, 1b } | ||
181 | 2: { move r0, r1; jrp lr } | ||
182 | STD_ENDPROC(flush_user_asm) | ||
183 | .pushsection __ex_table,"a" | ||
184 | .word 1b, 2b | ||
185 | .popsection | ||
186 | |||
187 | /* | ||
188 | * inv_user_asm takes the user target address in r0 and the | ||
189 | * number of bytes to invalidate in r1. | ||
190 | * It returns the number of not inv'able bytes (hopefully zero) in r0. | ||
191 | */ | ||
192 | STD_ENTRY(inv_user_asm) | ||
193 | bz r1, 2f | ||
194 | { movei r2, L2_CACHE_BYTES; add r1, r0, r1 } | ||
195 | { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 } | ||
196 | { and r0, r0, r2; and r1, r1, r2 } | ||
197 | { sub r1, r1, r0 } | ||
198 | 1: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() } | ||
199 | { addi r0, r0, CHIP_INV_STRIDE(); bnzt r1, 1b } | ||
200 | 2: { move r0, r1; jrp lr } | ||
201 | STD_ENDPROC(inv_user_asm) | ||
202 | .pushsection __ex_table,"a" | ||
203 | .word 1b, 2b | ||
204 | .popsection | ||
205 | |||
206 | /* | ||
207 | * finv_user_asm takes the user target address in r0 and the | ||
208 | * number of bytes to flush-invalidate in r1. | ||
209 | * It returns the number of not finv'able bytes (hopefully zero) in r0. | ||
210 | */ | ||
211 | STD_ENTRY(finv_user_asm) | ||
212 | bz r1, 2f | ||
213 | { movei r2, L2_CACHE_BYTES; add r1, r0, r1 } | ||
214 | { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 } | ||
215 | { and r0, r0, r2; and r1, r1, r2 } | ||
216 | { sub r1, r1, r0 } | ||
217 | 1: { finv r0; addi r1, r1, -CHIP_FINV_STRIDE() } | ||
218 | { addi r0, r0, CHIP_FINV_STRIDE(); bnzt r1, 1b } | ||
219 | 2: { move r0, r1; jrp lr } | ||
220 | STD_ENDPROC(finv_user_asm) | ||
221 | .pushsection __ex_table,"a" | ||
222 | .word 1b, 2b | ||
223 | .popsection | ||
diff --git a/arch/tile/mm/Makefile b/arch/tile/mm/Makefile new file mode 100644 index 000000000000..e252aeddc17d --- /dev/null +++ b/arch/tile/mm/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the linux tile-specific parts of the memory manager. | ||
3 | # | ||
4 | |||
5 | obj-y := init.o pgtable.o fault.o extable.o elf.o \ | ||
6 | mmap.o homecache.o migrate_$(BITS).o | ||
7 | |||
8 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
9 | obj-$(CONFIG_HIGHMEM) += highmem.o | ||
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c new file mode 100644 index 000000000000..818c9bef060c --- /dev/null +++ b/arch/tile/mm/elf.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/pagemap.h> | ||
17 | #include <linux/binfmts.h> | ||
18 | #include <linux/compat.h> | ||
19 | #include <linux/mman.h> | ||
20 | #include <linux/elf.h> | ||
21 | #include <asm/pgtable.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | |||
24 | /* Notify a running simulator, if any, that an exec just occurred. */ | ||
25 | static void sim_notify_exec(const char *binary_name) | ||
26 | { | ||
27 | unsigned char c; | ||
28 | do { | ||
29 | c = *binary_name++; | ||
30 | __insn_mtspr(SPR_SIM_CONTROL, | ||
31 | (SIM_CONTROL_OS_EXEC | ||
32 | | (c << _SIM_CONTROL_OPERATOR_BITS))); | ||
33 | |||
34 | } while (c); | ||
35 | } | ||
36 | |||
37 | static int notify_exec(void) | ||
38 | { | ||
39 | int retval = 0; /* failure */ | ||
40 | struct vm_area_struct *vma = current->mm->mmap; | ||
41 | while (vma) { | ||
42 | if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) | ||
43 | break; | ||
44 | vma = vma->vm_next; | ||
45 | } | ||
46 | if (vma) { | ||
47 | char *buf = (char *) __get_free_page(GFP_KERNEL); | ||
48 | if (buf) { | ||
49 | char *path = d_path(&vma->vm_file->f_path, | ||
50 | buf, PAGE_SIZE); | ||
51 | if (!IS_ERR(path)) { | ||
52 | sim_notify_exec(path); | ||
53 | retval = 1; | ||
54 | } | ||
55 | free_page((unsigned long)buf); | ||
56 | } | ||
57 | } | ||
58 | return retval; | ||
59 | } | ||
60 | |||
61 | /* Notify a running simulator, if any, that we loaded an interpreter. */ | ||
62 | static void sim_notify_interp(unsigned long load_addr) | ||
63 | { | ||
64 | size_t i; | ||
65 | for (i = 0; i < sizeof(load_addr); i++) { | ||
66 | unsigned char c = load_addr >> (i * 8); | ||
67 | __insn_mtspr(SPR_SIM_CONTROL, | ||
68 | (SIM_CONTROL_OS_INTERP | ||
69 | | (c << _SIM_CONTROL_OPERATOR_BITS))); | ||
70 | } | ||
71 | } | ||
72 | |||
73 | |||
74 | /* Kernel address of page used to map read-only kernel data into userspace. */ | ||
75 | static void *vdso_page; | ||
76 | |||
77 | /* One-entry array used for install_special_mapping. */ | ||
78 | static struct page *vdso_pages[1]; | ||
79 | |||
80 | int __init vdso_setup(void) | ||
81 | { | ||
82 | extern char __rt_sigreturn[], __rt_sigreturn_end[]; | ||
83 | vdso_page = (void *)get_zeroed_page(GFP_ATOMIC); | ||
84 | memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn); | ||
85 | vdso_pages[0] = virt_to_page(vdso_page); | ||
86 | return 0; | ||
87 | } | ||
88 | device_initcall(vdso_setup); | ||
89 | |||
90 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
91 | { | ||
92 | if (vma->vm_private_data == vdso_pages) | ||
93 | return "[vdso]"; | ||
94 | #ifndef __tilegx__ | ||
95 | if (vma->vm_start == MEM_USER_INTRPT) | ||
96 | return "[intrpt]"; | ||
97 | #endif | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
102 | int executable_stack) | ||
103 | { | ||
104 | struct mm_struct *mm = current->mm; | ||
105 | unsigned long vdso_base; | ||
106 | int retval = 0; | ||
107 | |||
108 | /* | ||
109 | * Notify the simulator that an exec just occurred. | ||
110 | * If we can't find the filename of the mapping, just use | ||
111 | * whatever was passed as the linux_binprm filename. | ||
112 | */ | ||
113 | if (!notify_exec()) | ||
114 | sim_notify_exec(bprm->filename); | ||
115 | |||
116 | down_write(&mm->mmap_sem); | ||
117 | |||
118 | /* | ||
119 | * MAYWRITE to allow gdb to COW and set breakpoints | ||
120 | * | ||
121 | * Make sure the vDSO gets into every core dump. Dumping its | ||
122 | * contents makes post-mortem fully interpretable later | ||
123 | * without matching up the same kernel and hardware config to | ||
124 | * see what PC values meant. | ||
125 | */ | ||
126 | vdso_base = VDSO_BASE; | ||
127 | retval = install_special_mapping(mm, vdso_base, PAGE_SIZE, | ||
128 | VM_READ|VM_EXEC| | ||
129 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | ||
130 | VM_ALWAYSDUMP, | ||
131 | vdso_pages); | ||
132 | |||
133 | #ifndef __tilegx__ | ||
134 | /* | ||
135 | * Set up a user-interrupt mapping here; the user can't | ||
136 | * create one themselves since it is above TASK_SIZE. | ||
137 | * We make it unwritable by default, so the model for adding | ||
138 | * interrupt vectors always involves an mprotect. | ||
139 | */ | ||
140 | if (!retval) { | ||
141 | unsigned long addr = MEM_USER_INTRPT; | ||
142 | addr = mmap_region(NULL, addr, INTRPT_SIZE, | ||
143 | MAP_FIXED|MAP_ANONYMOUS|MAP_PRIVATE, | ||
144 | VM_READ|VM_EXEC| | ||
145 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, 0); | ||
146 | if (addr > (unsigned long) -PAGE_SIZE) | ||
147 | retval = (int) addr; | ||
148 | } | ||
149 | #endif | ||
150 | |||
151 | up_write(&mm->mmap_sem); | ||
152 | |||
153 | return retval; | ||
154 | } | ||
155 | |||
156 | |||
157 | void elf_plat_init(struct pt_regs *regs, unsigned long load_addr) | ||
158 | { | ||
159 | /* Zero all registers. */ | ||
160 | memset(regs, 0, sizeof(*regs)); | ||
161 | |||
162 | /* Report the interpreter's load address. */ | ||
163 | sim_notify_interp(load_addr); | ||
164 | } | ||
diff --git a/arch/tile/mm/extable.c b/arch/tile/mm/extable.c new file mode 100644 index 000000000000..4fb0acb9d154 --- /dev/null +++ b/arch/tile/mm/extable.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
19 | int fixup_exception(struct pt_regs *regs) | ||
20 | { | ||
21 | const struct exception_table_entry *fixup; | ||
22 | |||
23 | fixup = search_exception_tables(regs->pc); | ||
24 | if (fixup) { | ||
25 | regs->pc = fixup->fixup; | ||
26 | return 1; | ||
27 | } | ||
28 | |||
29 | return 0; | ||
30 | } | ||
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c new file mode 100644 index 000000000000..9b6b92f07def --- /dev/null +++ b/arch/tile/mm/fault.c | |||
@@ -0,0 +1,905 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * From i386 code copyright (C) 1995 Linus Torvalds | ||
15 | */ | ||
16 | |||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/smp_lock.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/tty.h> | ||
31 | #include <linux/vt_kern.h> /* For unblank_screen() */ | ||
32 | #include <linux/highmem.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/kprobes.h> | ||
35 | #include <linux/hugetlb.h> | ||
36 | #include <linux/syscalls.h> | ||
37 | #include <linux/uaccess.h> | ||
38 | |||
39 | #include <asm/system.h> | ||
40 | #include <asm/pgalloc.h> | ||
41 | #include <asm/sections.h> | ||
42 | |||
43 | #include <arch/interrupts.h> | ||
44 | |||
45 | /* | ||
46 | * Unlock any spinlocks which will prevent us from getting the | ||
47 | * message out | ||
48 | */ | ||
49 | void bust_spinlocks(int yes) | ||
50 | { | ||
51 | int loglevel_save = console_loglevel; | ||
52 | |||
53 | if (yes) { | ||
54 | oops_in_progress = 1; | ||
55 | return; | ||
56 | } | ||
57 | oops_in_progress = 0; | ||
58 | /* | ||
59 | * OK, the message is on the console. Now we call printk() | ||
60 | * without oops_in_progress set so that printk will give klogd | ||
61 | * a poke. Hold onto your hats... | ||
62 | */ | ||
63 | console_loglevel = 15; /* NMI oopser may have shut the console up */ | ||
64 | printk(" "); | ||
65 | console_loglevel = loglevel_save; | ||
66 | } | ||
67 | |||
68 | static noinline void force_sig_info_fault(int si_signo, int si_code, | ||
69 | unsigned long address, int fault_num, struct task_struct *tsk) | ||
70 | { | ||
71 | siginfo_t info; | ||
72 | |||
73 | if (unlikely(tsk->pid < 2)) { | ||
74 | panic("Signal %d (code %d) at %#lx sent to %s!", | ||
75 | si_signo, si_code & 0xffff, address, | ||
76 | tsk->pid ? "init" : "the idle task"); | ||
77 | } | ||
78 | |||
79 | info.si_signo = si_signo; | ||
80 | info.si_errno = 0; | ||
81 | info.si_code = si_code; | ||
82 | info.si_addr = (void __user *)address; | ||
83 | info.si_trapno = fault_num; | ||
84 | force_sig_info(si_signo, &info, tsk); | ||
85 | } | ||
86 | |||
87 | #ifndef __tilegx__ | ||
88 | /* | ||
89 | * Synthesize the fault a PL0 process would get by doing a word-load of | ||
90 | * an unaligned address or a high kernel address. Called indirectly | ||
91 | * from sys_cmpxchg() in kernel/intvec.S. | ||
92 | */ | ||
93 | int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) | ||
94 | { | ||
95 | if (address >= PAGE_OFFSET) | ||
96 | force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, | ||
97 | INT_DTLB_MISS, current); | ||
98 | else | ||
99 | force_sig_info_fault(SIGBUS, BUS_ADRALN, address, | ||
100 | INT_UNALIGN_DATA, current); | ||
101 | |||
102 | /* | ||
103 | * Adjust pc to point at the actual instruction, which is unusual | ||
104 | * for syscalls normally, but is appropriate when we are claiming | ||
105 | * that a syscall swint1 caused a page fault or bus error. | ||
106 | */ | ||
107 | regs->pc -= 8; | ||
108 | |||
109 | /* | ||
110 | * Mark this as a caller-save interrupt, like a normal page fault, | ||
111 | * so that when we go through the signal handler path we will | ||
112 | * properly restore r0, r1, and r2 for the signal handler arguments. | ||
113 | */ | ||
114 | regs->flags |= PT_FLAGS_CALLER_SAVES; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
121 | { | ||
122 | unsigned index = pgd_index(address); | ||
123 | pgd_t *pgd_k; | ||
124 | pud_t *pud, *pud_k; | ||
125 | pmd_t *pmd, *pmd_k; | ||
126 | |||
127 | pgd += index; | ||
128 | pgd_k = init_mm.pgd + index; | ||
129 | |||
130 | if (!pgd_present(*pgd_k)) | ||
131 | return NULL; | ||
132 | |||
133 | pud = pud_offset(pgd, address); | ||
134 | pud_k = pud_offset(pgd_k, address); | ||
135 | if (!pud_present(*pud_k)) | ||
136 | return NULL; | ||
137 | |||
138 | pmd = pmd_offset(pud, address); | ||
139 | pmd_k = pmd_offset(pud_k, address); | ||
140 | if (!pmd_present(*pmd_k)) | ||
141 | return NULL; | ||
142 | if (!pmd_present(*pmd)) { | ||
143 | set_pmd(pmd, *pmd_k); | ||
144 | arch_flush_lazy_mmu_mode(); | ||
145 | } else | ||
146 | BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); | ||
147 | return pmd_k; | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Handle a fault on the vmalloc or module mapping area | ||
152 | */ | ||
153 | static inline int vmalloc_fault(pgd_t *pgd, unsigned long address) | ||
154 | { | ||
155 | pmd_t *pmd_k; | ||
156 | pte_t *pte_k; | ||
157 | |||
158 | /* Make sure we are in vmalloc area */ | ||
159 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
160 | return -1; | ||
161 | |||
162 | /* | ||
163 | * Synchronize this task's top level page-table | ||
164 | * with the 'reference' page table. | ||
165 | */ | ||
166 | pmd_k = vmalloc_sync_one(pgd, address); | ||
167 | if (!pmd_k) | ||
168 | return -1; | ||
169 | if (pmd_huge(*pmd_k)) | ||
170 | return 0; /* support TILE huge_vmap() API */ | ||
171 | pte_k = pte_offset_kernel(pmd_k, address); | ||
172 | if (!pte_present(*pte_k)) | ||
173 | return -1; | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | /* Wait until this PTE has completed migration. */ | ||
178 | static void wait_for_migration(pte_t *pte) | ||
179 | { | ||
180 | if (pte_migrating(*pte)) { | ||
181 | /* | ||
182 | * Wait until the migrater fixes up this pte. | ||
183 | * We scale the loop count by the clock rate so we'll wait for | ||
184 | * a few seconds here. | ||
185 | */ | ||
186 | int retries = 0; | ||
187 | int bound = get_clock_rate(); | ||
188 | while (pte_migrating(*pte)) { | ||
189 | barrier(); | ||
190 | if (++retries > bound) | ||
191 | panic("Hit migrating PTE (%#llx) and" | ||
192 | " page PFN %#lx still migrating", | ||
193 | pte->val, pte_pfn(*pte)); | ||
194 | } | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * It's not generally safe to use "current" to get the page table pointer, | ||
200 | * since we might be running an oprofile interrupt in the middle of a | ||
201 | * task switch. | ||
202 | */ | ||
203 | static pgd_t *get_current_pgd(void) | ||
204 | { | ||
205 | HV_Context ctx = hv_inquire_context(); | ||
206 | unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; | ||
207 | struct page *pgd_page = pfn_to_page(pgd_pfn); | ||
208 | BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ | ||
209 | return (pgd_t *) __va(ctx.page_table); | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * We can receive a page fault from a migrating PTE at any time. | ||
214 | * Handle it by just waiting until the fault resolves. | ||
215 | * | ||
216 | * It's also possible to get a migrating kernel PTE that resolves | ||
217 | * itself during the downcall from hypervisor to Linux. We just check | ||
218 | * here to see if the PTE seems valid, and if so we retry it. | ||
219 | * | ||
220 | * NOTE! We MUST NOT take any locks for this case. We may be in an | ||
221 | * interrupt or a critical region, and must do as little as possible. | ||
222 | * Similarly, we can't use atomic ops here, since we may be handling a | ||
223 | * fault caused by an atomic op access. | ||
224 | */ | ||
225 | static int handle_migrating_pte(pgd_t *pgd, int fault_num, | ||
226 | unsigned long address, | ||
227 | int is_kernel_mode, int write) | ||
228 | { | ||
229 | pud_t *pud; | ||
230 | pmd_t *pmd; | ||
231 | pte_t *pte; | ||
232 | pte_t pteval; | ||
233 | |||
234 | if (pgd_addr_invalid(address)) | ||
235 | return 0; | ||
236 | |||
237 | pgd += pgd_index(address); | ||
238 | pud = pud_offset(pgd, address); | ||
239 | if (!pud || !pud_present(*pud)) | ||
240 | return 0; | ||
241 | pmd = pmd_offset(pud, address); | ||
242 | if (!pmd || !pmd_present(*pmd)) | ||
243 | return 0; | ||
244 | pte = pmd_huge_page(*pmd) ? ((pte_t *)pmd) : | ||
245 | pte_offset_kernel(pmd, address); | ||
246 | pteval = *pte; | ||
247 | if (pte_migrating(pteval)) { | ||
248 | wait_for_migration(pte); | ||
249 | return 1; | ||
250 | } | ||
251 | |||
252 | if (!is_kernel_mode || !pte_present(pteval)) | ||
253 | return 0; | ||
254 | if (fault_num == INT_ITLB_MISS) { | ||
255 | if (pte_exec(pteval)) | ||
256 | return 1; | ||
257 | } else if (write) { | ||
258 | if (pte_write(pteval)) | ||
259 | return 1; | ||
260 | } else { | ||
261 | if (pte_read(pteval)) | ||
262 | return 1; | ||
263 | } | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * This routine is responsible for faulting in user pages. | ||
270 | * It passes the work off to one of the appropriate routines. | ||
271 | * It returns true if the fault was successfully handled. | ||
272 | */ | ||
273 | static int handle_page_fault(struct pt_regs *regs, | ||
274 | int fault_num, | ||
275 | int is_page_fault, | ||
276 | unsigned long address, | ||
277 | int write) | ||
278 | { | ||
279 | struct task_struct *tsk; | ||
280 | struct mm_struct *mm; | ||
281 | struct vm_area_struct *vma; | ||
282 | unsigned long stack_offset; | ||
283 | int fault; | ||
284 | int si_code; | ||
285 | int is_kernel_mode; | ||
286 | pgd_t *pgd; | ||
287 | |||
288 | /* on TILE, protection faults are always writes */ | ||
289 | if (!is_page_fault) | ||
290 | write = 1; | ||
291 | |||
292 | is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); | ||
293 | |||
294 | tsk = validate_current(); | ||
295 | |||
296 | /* | ||
297 | * Check to see if we might be overwriting the stack, and bail | ||
298 | * out if so. The page fault code is a relatively likely | ||
299 | * place to get trapped in an infinite regress, and once we | ||
300 | * overwrite the whole stack, it becomes very hard to recover. | ||
301 | */ | ||
302 | stack_offset = stack_pointer & (THREAD_SIZE-1); | ||
303 | if (stack_offset < THREAD_SIZE / 8) { | ||
304 | printk(KERN_ALERT "Potential stack overrun: sp %#lx\n", | ||
305 | stack_pointer); | ||
306 | show_regs(regs); | ||
307 | printk(KERN_ALERT "Killing current process %d/%s\n", | ||
308 | tsk->pid, tsk->comm); | ||
309 | do_group_exit(SIGKILL); | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * Early on, we need to check for migrating PTE entries; | ||
314 | * see homecache.c. If we find a migrating PTE, we wait until | ||
315 | * the backing page claims to be done migrating, then we procede. | ||
316 | * For kernel PTEs, we rewrite the PTE and return and retry. | ||
317 | * Otherwise, we treat the fault like a normal "no PTE" fault, | ||
318 | * rather than trying to patch up the existing PTE. | ||
319 | */ | ||
320 | pgd = get_current_pgd(); | ||
321 | if (handle_migrating_pte(pgd, fault_num, address, | ||
322 | is_kernel_mode, write)) | ||
323 | return 1; | ||
324 | |||
325 | si_code = SEGV_MAPERR; | ||
326 | |||
327 | /* | ||
328 | * We fault-in kernel-space virtual memory on-demand. The | ||
329 | * 'reference' page table is init_mm.pgd. | ||
330 | * | ||
331 | * NOTE! We MUST NOT take any locks for this case. We may | ||
332 | * be in an interrupt or a critical region, and should | ||
333 | * only copy the information from the master page table, | ||
334 | * nothing more. | ||
335 | * | ||
336 | * This verifies that the fault happens in kernel space | ||
337 | * and that the fault was not a protection fault. | ||
338 | */ | ||
339 | if (unlikely(address >= TASK_SIZE && | ||
340 | !is_arch_mappable_range(address, 0))) { | ||
341 | if (is_kernel_mode && is_page_fault && | ||
342 | vmalloc_fault(pgd, address) >= 0) | ||
343 | return 1; | ||
344 | /* | ||
345 | * Don't take the mm semaphore here. If we fixup a prefetch | ||
346 | * fault we could otherwise deadlock. | ||
347 | */ | ||
348 | mm = NULL; /* happy compiler */ | ||
349 | vma = NULL; | ||
350 | goto bad_area_nosemaphore; | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * If we're trying to touch user-space addresses, we must | ||
355 | * be either at PL0, or else with interrupts enabled in the | ||
356 | * kernel, so either way we can re-enable interrupts here. | ||
357 | */ | ||
358 | local_irq_enable(); | ||
359 | |||
360 | mm = tsk->mm; | ||
361 | |||
362 | /* | ||
363 | * If we're in an interrupt, have no user context or are running in an | ||
364 | * atomic region then we must not take the fault. | ||
365 | */ | ||
366 | if (in_atomic() || !mm) { | ||
367 | vma = NULL; /* happy compiler */ | ||
368 | goto bad_area_nosemaphore; | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | * When running in the kernel we expect faults to occur only to | ||
373 | * addresses in user space. All other faults represent errors in the | ||
374 | * kernel and should generate an OOPS. Unfortunately, in the case of an | ||
375 | * erroneous fault occurring in a code path which already holds mmap_sem | ||
376 | * we will deadlock attempting to validate the fault against the | ||
377 | * address space. Luckily the kernel only validly references user | ||
378 | * space from well defined areas of code, which are listed in the | ||
379 | * exceptions table. | ||
380 | * | ||
381 | * As the vast majority of faults will be valid we will only perform | ||
382 | * the source reference check when there is a possibility of a deadlock. | ||
383 | * Attempt to lock the address space, if we cannot we then validate the | ||
384 | * source. If this is invalid we can skip the address space check, | ||
385 | * thus avoiding the deadlock. | ||
386 | */ | ||
387 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
388 | if (is_kernel_mode && | ||
389 | !search_exception_tables(regs->pc)) { | ||
390 | vma = NULL; /* happy compiler */ | ||
391 | goto bad_area_nosemaphore; | ||
392 | } | ||
393 | down_read(&mm->mmap_sem); | ||
394 | } | ||
395 | |||
396 | vma = find_vma(mm, address); | ||
397 | if (!vma) | ||
398 | goto bad_area; | ||
399 | if (vma->vm_start <= address) | ||
400 | goto good_area; | ||
401 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
402 | goto bad_area; | ||
403 | if (regs->sp < PAGE_OFFSET) { | ||
404 | /* | ||
405 | * accessing the stack below sp is always a bug. | ||
406 | */ | ||
407 | if (address < regs->sp) | ||
408 | goto bad_area; | ||
409 | } | ||
410 | if (expand_stack(vma, address)) | ||
411 | goto bad_area; | ||
412 | |||
413 | /* | ||
414 | * Ok, we have a good vm_area for this memory access, so | ||
415 | * we can handle it.. | ||
416 | */ | ||
417 | good_area: | ||
418 | si_code = SEGV_ACCERR; | ||
419 | if (fault_num == INT_ITLB_MISS) { | ||
420 | if (!(vma->vm_flags & VM_EXEC)) | ||
421 | goto bad_area; | ||
422 | } else if (write) { | ||
423 | #ifdef TEST_VERIFY_AREA | ||
424 | if (!is_page_fault && regs->cs == KERNEL_CS) | ||
425 | printk("WP fault at "REGFMT"\n", regs->eip); | ||
426 | #endif | ||
427 | if (!(vma->vm_flags & VM_WRITE)) | ||
428 | goto bad_area; | ||
429 | } else { | ||
430 | if (!is_page_fault || !(vma->vm_flags & VM_READ)) | ||
431 | goto bad_area; | ||
432 | } | ||
433 | |||
434 | survive: | ||
435 | /* | ||
436 | * If for any reason at all we couldn't handle the fault, | ||
437 | * make sure we exit gracefully rather than endlessly redo | ||
438 | * the fault. | ||
439 | */ | ||
440 | fault = handle_mm_fault(mm, vma, address, write); | ||
441 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
442 | if (fault & VM_FAULT_OOM) | ||
443 | goto out_of_memory; | ||
444 | else if (fault & VM_FAULT_SIGBUS) | ||
445 | goto do_sigbus; | ||
446 | BUG(); | ||
447 | } | ||
448 | if (fault & VM_FAULT_MAJOR) | ||
449 | tsk->maj_flt++; | ||
450 | else | ||
451 | tsk->min_flt++; | ||
452 | |||
453 | /* | ||
454 | * If this was an asynchronous fault, | ||
455 | * restart the appropriate engine. | ||
456 | */ | ||
457 | switch (fault_num) { | ||
458 | #if CHIP_HAS_TILE_DMA() | ||
459 | case INT_DMATLB_MISS: | ||
460 | case INT_DMATLB_MISS_DWNCL: | ||
461 | case INT_DMATLB_ACCESS: | ||
462 | case INT_DMATLB_ACCESS_DWNCL: | ||
463 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); | ||
464 | break; | ||
465 | #endif | ||
466 | #if CHIP_HAS_SN_PROC() | ||
467 | case INT_SNITLB_MISS: | ||
468 | case INT_SNITLB_MISS_DWNCL: | ||
469 | __insn_mtspr(SPR_SNCTL, | ||
470 | __insn_mfspr(SPR_SNCTL) & | ||
471 | ~SPR_SNCTL__FRZPROC_MASK); | ||
472 | break; | ||
473 | #endif | ||
474 | } | ||
475 | |||
476 | up_read(&mm->mmap_sem); | ||
477 | return 1; | ||
478 | |||
479 | /* | ||
480 | * Something tried to access memory that isn't in our memory map.. | ||
481 | * Fix it, but check if it's kernel or user first.. | ||
482 | */ | ||
483 | bad_area: | ||
484 | up_read(&mm->mmap_sem); | ||
485 | |||
486 | bad_area_nosemaphore: | ||
487 | /* User mode accesses just cause a SIGSEGV */ | ||
488 | if (!is_kernel_mode) { | ||
489 | /* | ||
490 | * It's possible to have interrupts off here. | ||
491 | */ | ||
492 | local_irq_enable(); | ||
493 | |||
494 | force_sig_info_fault(SIGSEGV, si_code, address, | ||
495 | fault_num, tsk); | ||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | no_context: | ||
500 | /* Are we prepared to handle this kernel fault? */ | ||
501 | if (fixup_exception(regs)) | ||
502 | return 0; | ||
503 | |||
504 | /* | ||
505 | * Oops. The kernel tried to access some bad page. We'll have to | ||
506 | * terminate things with extreme prejudice. | ||
507 | */ | ||
508 | |||
509 | bust_spinlocks(1); | ||
510 | |||
511 | /* FIXME: no lookup_address() yet */ | ||
512 | #ifdef SUPPORT_LOOKUP_ADDRESS | ||
513 | if (fault_num == INT_ITLB_MISS) { | ||
514 | pte_t *pte = lookup_address(address); | ||
515 | |||
516 | if (pte && pte_present(*pte) && !pte_exec_kernel(*pte)) | ||
517 | printk(KERN_CRIT "kernel tried to execute" | ||
518 | " non-executable page - exploit attempt?" | ||
519 | " (uid: %d)\n", current->uid); | ||
520 | } | ||
521 | #endif | ||
522 | if (address < PAGE_SIZE) | ||
523 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference\n"); | ||
524 | else | ||
525 | printk(KERN_ALERT "Unable to handle kernel paging request\n"); | ||
526 | printk(" at virtual address "REGFMT", pc "REGFMT"\n", | ||
527 | address, regs->pc); | ||
528 | |||
529 | show_regs(regs); | ||
530 | |||
531 | if (unlikely(tsk->pid < 2)) { | ||
532 | panic("Kernel page fault running %s!", | ||
533 | tsk->pid ? "init" : "the idle task"); | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * More FIXME: we should probably copy the i386 here and | ||
538 | * implement a generic die() routine. Not today. | ||
539 | */ | ||
540 | #ifdef SUPPORT_DIE | ||
541 | die("Oops", regs); | ||
542 | #endif | ||
543 | bust_spinlocks(1); | ||
544 | |||
545 | do_group_exit(SIGKILL); | ||
546 | |||
547 | /* | ||
548 | * We ran out of memory, or some other thing happened to us that made | ||
549 | * us unable to handle the page fault gracefully. | ||
550 | */ | ||
551 | out_of_memory: | ||
552 | up_read(&mm->mmap_sem); | ||
553 | if (is_global_init(tsk)) { | ||
554 | yield(); | ||
555 | down_read(&mm->mmap_sem); | ||
556 | goto survive; | ||
557 | } | ||
558 | printk("VM: killing process %s\n", tsk->comm); | ||
559 | if (!is_kernel_mode) | ||
560 | do_group_exit(SIGKILL); | ||
561 | goto no_context; | ||
562 | |||
563 | do_sigbus: | ||
564 | up_read(&mm->mmap_sem); | ||
565 | |||
566 | /* Kernel mode? Handle exceptions or die */ | ||
567 | if (is_kernel_mode) | ||
568 | goto no_context; | ||
569 | |||
570 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); | ||
571 | return 0; | ||
572 | } | ||
573 | |||
574 | #ifndef __tilegx__ | ||
575 | |||
576 | extern char sys_cmpxchg[], __sys_cmpxchg_end[]; | ||
577 | extern char __sys_cmpxchg_grab_lock[]; | ||
578 | extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; | ||
579 | |||
580 | /* | ||
581 | * We return this structure in registers to avoid having to write | ||
582 | * additional save/restore code in the intvec.S caller. | ||
583 | */ | ||
584 | struct intvec_state { | ||
585 | void *handler; | ||
586 | unsigned long vecnum; | ||
587 | unsigned long fault_num; | ||
588 | unsigned long info; | ||
589 | unsigned long retval; | ||
590 | }; | ||
591 | |||
592 | /* We must release ICS before panicking or we won't get anywhere. */ | ||
593 | #define ics_panic(fmt, ...) do { \ | ||
594 | __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \ | ||
595 | panic(fmt, __VA_ARGS__); \ | ||
596 | } while (0) | ||
597 | |||
598 | void do_page_fault(struct pt_regs *regs, int fault_num, | ||
599 | unsigned long address, unsigned long write); | ||
600 | |||
601 | /* | ||
602 | * When we take an ITLB or DTLB fault or access violation in the | ||
603 | * supervisor while the critical section bit is set, the hypervisor is | ||
604 | * reluctant to write new values into the EX_CONTEXT_1_x registers, | ||
605 | * since that might indicate we have not yet squirreled the SPR | ||
606 | * contents away and can thus safely take a recursive interrupt. | ||
607 | * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. | ||
608 | */ | ||
609 | struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num, | ||
610 | unsigned long address, | ||
611 | unsigned long info) | ||
612 | { | ||
613 | unsigned long pc = info & ~1; | ||
614 | int write = info & 1; | ||
615 | pgd_t *pgd = get_current_pgd(); | ||
616 | |||
617 | /* Retval is 1 at first since we will handle the fault fully. */ | ||
618 | struct intvec_state state = { | ||
619 | do_page_fault, fault_num, address, write, 1 | ||
620 | }; | ||
621 | |||
622 | /* Validate that we are plausibly in the right routine. */ | ||
623 | if ((pc & 0x7) != 0 || pc < PAGE_OFFSET || | ||
624 | (fault_num != INT_DTLB_MISS && | ||
625 | fault_num != INT_DTLB_ACCESS)) { | ||
626 | unsigned long old_pc = regs->pc; | ||
627 | regs->pc = pc; | ||
628 | ics_panic("Bad ICS page fault args:" | ||
629 | " old PC %#lx, fault %d/%d at %#lx\n", | ||
630 | old_pc, fault_num, write, address); | ||
631 | } | ||
632 | |||
633 | /* We might be faulting on a vmalloc page, so check that first. */ | ||
634 | if (fault_num != INT_DTLB_ACCESS && vmalloc_fault(pgd, address) >= 0) | ||
635 | return state; | ||
636 | |||
637 | /* | ||
638 | * If we faulted with ICS set in sys_cmpxchg, we are providing | ||
639 | * a user syscall service that should generate a signal on | ||
640 | * fault. We didn't set up a kernel stack on initial entry to | ||
641 | * sys_cmpxchg, but instead had one set up by the fault, which | ||
642 | * (because sys_cmpxchg never releases ICS) came to us via the | ||
643 | * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are | ||
644 | * still referencing the original user code. We release the | ||
645 | * atomic lock and rewrite pt_regs so that it appears that we | ||
646 | * came from user-space directly, and after we finish the | ||
647 | * fault we'll go back to user space and re-issue the swint. | ||
648 | * This way the backtrace information is correct if we need to | ||
649 | * emit a stack dump at any point while handling this. | ||
650 | * | ||
651 | * Must match register use in sys_cmpxchg(). | ||
652 | */ | ||
653 | if (pc >= (unsigned long) sys_cmpxchg && | ||
654 | pc < (unsigned long) __sys_cmpxchg_end) { | ||
655 | #ifdef CONFIG_SMP | ||
656 | /* Don't unlock before we could have locked. */ | ||
657 | if (pc >= (unsigned long)__sys_cmpxchg_grab_lock) { | ||
658 | int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); | ||
659 | __atomic_fault_unlock(lock_ptr); | ||
660 | } | ||
661 | #endif | ||
662 | regs->sp = regs->regs[27]; | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * We can also fault in the atomic assembly, in which | ||
667 | * case we use the exception table to do the first-level fixup. | ||
668 | * We may re-fixup again in the real fault handler if it | ||
669 | * turns out the faulting address is just bad, and not, | ||
670 | * for example, migrating. | ||
671 | */ | ||
672 | else if (pc >= (unsigned long) __start_atomic_asm_code && | ||
673 | pc < (unsigned long) __end_atomic_asm_code) { | ||
674 | const struct exception_table_entry *fixup; | ||
675 | #ifdef CONFIG_SMP | ||
676 | /* Unlock the atomic lock. */ | ||
677 | int *lock_ptr = (int *)(regs->regs[ATOMIC_LOCK_REG]); | ||
678 | __atomic_fault_unlock(lock_ptr); | ||
679 | #endif | ||
680 | fixup = search_exception_tables(pc); | ||
681 | if (!fixup) | ||
682 | ics_panic("ICS atomic fault not in table:" | ||
683 | " PC %#lx, fault %d", pc, fault_num); | ||
684 | regs->pc = fixup->fixup; | ||
685 | regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0); | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | * NOTE: the one other type of access that might bring us here | ||
690 | * are the memory ops in __tns_atomic_acquire/__tns_atomic_release, | ||
691 | * but we don't have to check specially for them since we can | ||
692 | * always safely return to the address of the fault and retry, | ||
693 | * since no separate atomic locks are involved. | ||
694 | */ | ||
695 | |||
696 | /* | ||
697 | * Now that we have released the atomic lock (if necessary), | ||
698 | * it's safe to spin if the PTE that caused the fault was migrating. | ||
699 | */ | ||
700 | if (fault_num == INT_DTLB_ACCESS) | ||
701 | write = 1; | ||
702 | if (handle_migrating_pte(pgd, fault_num, address, 1, write)) | ||
703 | return state; | ||
704 | |||
705 | /* Return zero so that we continue on with normal fault handling. */ | ||
706 | state.retval = 0; | ||
707 | return state; | ||
708 | } | ||
709 | |||
710 | #endif /* !__tilegx__ */ | ||
711 | |||
712 | /* | ||
713 | * This routine handles page faults. It determines the address, and the | ||
714 | * problem, and then passes it handle_page_fault() for normal DTLB and | ||
715 | * ITLB issues, and for DMA or SN processor faults when we are in user | ||
716 | * space. For the latter, if we're in kernel mode, we just save the | ||
717 | * interrupt away appropriately and return immediately. We can't do | ||
718 | * page faults for user code while in kernel mode. | ||
719 | */ | ||
720 | void do_page_fault(struct pt_regs *regs, int fault_num, | ||
721 | unsigned long address, unsigned long write) | ||
722 | { | ||
723 | int is_page_fault; | ||
724 | |||
725 | /* This case should have been handled by do_page_fault_ics(). */ | ||
726 | BUG_ON(write & ~1); | ||
727 | |||
728 | #if CHIP_HAS_TILE_DMA() | ||
729 | /* | ||
730 | * If it's a DMA fault, suspend the transfer while we're | ||
731 | * handling the miss; we'll restart after it's handled. If we | ||
732 | * don't suspend, it's possible that this process could swap | ||
733 | * out and back in, and restart the engine since the DMA is | ||
734 | * still 'running'. | ||
735 | */ | ||
736 | if (fault_num == INT_DMATLB_MISS || | ||
737 | fault_num == INT_DMATLB_ACCESS || | ||
738 | fault_num == INT_DMATLB_MISS_DWNCL || | ||
739 | fault_num == INT_DMATLB_ACCESS_DWNCL) { | ||
740 | __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__SUSPEND_MASK); | ||
741 | while (__insn_mfspr(SPR_DMA_USER_STATUS) & | ||
742 | SPR_DMA_STATUS__BUSY_MASK) | ||
743 | ; | ||
744 | } | ||
745 | #endif | ||
746 | |||
747 | /* Validate fault num and decide if this is a first-time page fault. */ | ||
748 | switch (fault_num) { | ||
749 | case INT_ITLB_MISS: | ||
750 | case INT_DTLB_MISS: | ||
751 | #if CHIP_HAS_TILE_DMA() | ||
752 | case INT_DMATLB_MISS: | ||
753 | case INT_DMATLB_MISS_DWNCL: | ||
754 | #endif | ||
755 | #if CHIP_HAS_SN_PROC() | ||
756 | case INT_SNITLB_MISS: | ||
757 | case INT_SNITLB_MISS_DWNCL: | ||
758 | #endif | ||
759 | is_page_fault = 1; | ||
760 | break; | ||
761 | |||
762 | case INT_DTLB_ACCESS: | ||
763 | #if CHIP_HAS_TILE_DMA() | ||
764 | case INT_DMATLB_ACCESS: | ||
765 | case INT_DMATLB_ACCESS_DWNCL: | ||
766 | #endif | ||
767 | is_page_fault = 0; | ||
768 | break; | ||
769 | |||
770 | default: | ||
771 | panic("Bad fault number %d in do_page_fault", fault_num); | ||
772 | } | ||
773 | |||
774 | if (EX1_PL(regs->ex1) != USER_PL) { | ||
775 | struct async_tlb *async; | ||
776 | switch (fault_num) { | ||
777 | #if CHIP_HAS_TILE_DMA() | ||
778 | case INT_DMATLB_MISS: | ||
779 | case INT_DMATLB_ACCESS: | ||
780 | case INT_DMATLB_MISS_DWNCL: | ||
781 | case INT_DMATLB_ACCESS_DWNCL: | ||
782 | async = ¤t->thread.dma_async_tlb; | ||
783 | break; | ||
784 | #endif | ||
785 | #if CHIP_HAS_SN_PROC() | ||
786 | case INT_SNITLB_MISS: | ||
787 | case INT_SNITLB_MISS_DWNCL: | ||
788 | async = ¤t->thread.sn_async_tlb; | ||
789 | break; | ||
790 | #endif | ||
791 | default: | ||
792 | async = NULL; | ||
793 | } | ||
794 | if (async) { | ||
795 | |||
796 | /* | ||
797 | * No vmalloc check required, so we can allow | ||
798 | * interrupts immediately at this point. | ||
799 | */ | ||
800 | local_irq_enable(); | ||
801 | |||
802 | set_thread_flag(TIF_ASYNC_TLB); | ||
803 | if (async->fault_num != 0) { | ||
804 | panic("Second async fault %d;" | ||
805 | " old fault was %d (%#lx/%ld)", | ||
806 | fault_num, async->fault_num, | ||
807 | address, write); | ||
808 | } | ||
809 | BUG_ON(fault_num == 0); | ||
810 | async->fault_num = fault_num; | ||
811 | async->is_fault = is_page_fault; | ||
812 | async->is_write = write; | ||
813 | async->address = address; | ||
814 | return; | ||
815 | } | ||
816 | } | ||
817 | |||
818 | handle_page_fault(regs, fault_num, is_page_fault, address, write); | ||
819 | } | ||
820 | |||
821 | |||
822 | #if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() | ||
823 | /* | ||
824 | * Check an async_tlb structure to see if a deferred fault is waiting, | ||
825 | * and if so pass it to the page-fault code. | ||
826 | */ | ||
827 | static void handle_async_page_fault(struct pt_regs *regs, | ||
828 | struct async_tlb *async) | ||
829 | { | ||
830 | if (async->fault_num) { | ||
831 | /* | ||
832 | * Clear async->fault_num before calling the page-fault | ||
833 | * handler so that if we re-interrupt before returning | ||
834 | * from the function we have somewhere to put the | ||
835 | * information from the new interrupt. | ||
836 | */ | ||
837 | int fault_num = async->fault_num; | ||
838 | async->fault_num = 0; | ||
839 | handle_page_fault(regs, fault_num, async->is_fault, | ||
840 | async->address, async->is_write); | ||
841 | } | ||
842 | } | ||
843 | #endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */ | ||
844 | |||
845 | |||
846 | /* | ||
847 | * This routine effectively re-issues asynchronous page faults | ||
848 | * when we are returning to user space. | ||
849 | */ | ||
850 | void do_async_page_fault(struct pt_regs *regs) | ||
851 | { | ||
852 | /* | ||
853 | * Clear thread flag early. If we re-interrupt while processing | ||
854 | * code here, we will reset it and recall this routine before | ||
855 | * returning to user space. | ||
856 | */ | ||
857 | clear_thread_flag(TIF_ASYNC_TLB); | ||
858 | |||
859 | #if CHIP_HAS_TILE_DMA() | ||
860 | handle_async_page_fault(regs, ¤t->thread.dma_async_tlb); | ||
861 | #endif | ||
862 | #if CHIP_HAS_SN_PROC() | ||
863 | handle_async_page_fault(regs, ¤t->thread.sn_async_tlb); | ||
864 | #endif | ||
865 | } | ||
866 | |||
867 | void vmalloc_sync_all(void) | ||
868 | { | ||
869 | #ifdef __tilegx__ | ||
870 | /* Currently all L1 kernel pmd's are static and shared. */ | ||
871 | BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); | ||
872 | #else | ||
873 | /* | ||
874 | * Note that races in the updates of insync and start aren't | ||
875 | * problematic: insync can only get set bits added, and updates to | ||
876 | * start are only improving performance (without affecting correctness | ||
877 | * if undone). | ||
878 | */ | ||
879 | static DECLARE_BITMAP(insync, PTRS_PER_PGD); | ||
880 | static unsigned long start = PAGE_OFFSET; | ||
881 | unsigned long address; | ||
882 | |||
883 | BUILD_BUG_ON(PAGE_OFFSET & ~PGDIR_MASK); | ||
884 | for (address = start; address >= PAGE_OFFSET; address += PGDIR_SIZE) { | ||
885 | if (!test_bit(pgd_index(address), insync)) { | ||
886 | unsigned long flags; | ||
887 | struct list_head *pos; | ||
888 | |||
889 | spin_lock_irqsave(&pgd_lock, flags); | ||
890 | list_for_each(pos, &pgd_list) | ||
891 | if (!vmalloc_sync_one(list_to_pgd(pos), | ||
892 | address)) { | ||
893 | /* Must be at first entry in list. */ | ||
894 | BUG_ON(pos != pgd_list.next); | ||
895 | break; | ||
896 | } | ||
897 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
898 | if (pos != pgd_list.next) | ||
899 | set_bit(pgd_index(address), insync); | ||
900 | } | ||
901 | if (address == start && test_bit(pgd_index(address), insync)) | ||
902 | start = address + PGDIR_SIZE; | ||
903 | } | ||
904 | #endif | ||
905 | } | ||
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c new file mode 100644 index 000000000000..1fcecc5b9e03 --- /dev/null +++ b/arch/tile/mm/highmem.c | |||
@@ -0,0 +1,328 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/highmem.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/pagemap.h> | ||
18 | #include <asm/homecache.h> | ||
19 | |||
20 | #define kmap_get_pte(vaddr) \ | ||
21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ | ||
22 | (vaddr)), (vaddr)) | ||
23 | |||
24 | |||
25 | void *kmap(struct page *page) | ||
26 | { | ||
27 | void *kva; | ||
28 | unsigned long flags; | ||
29 | pte_t *ptep; | ||
30 | |||
31 | might_sleep(); | ||
32 | if (!PageHighMem(page)) | ||
33 | return page_address(page); | ||
34 | kva = kmap_high(page); | ||
35 | |||
36 | /* | ||
37 | * Rewrite the PTE under the lock. This ensures that the page | ||
38 | * is not currently migrating. | ||
39 | */ | ||
40 | ptep = kmap_get_pte((unsigned long)kva); | ||
41 | flags = homecache_kpte_lock(); | ||
42 | set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); | ||
43 | homecache_kpte_unlock(flags); | ||
44 | |||
45 | return kva; | ||
46 | } | ||
47 | EXPORT_SYMBOL(kmap); | ||
48 | |||
49 | void kunmap(struct page *page) | ||
50 | { | ||
51 | if (in_interrupt()) | ||
52 | BUG(); | ||
53 | if (!PageHighMem(page)) | ||
54 | return; | ||
55 | kunmap_high(page); | ||
56 | } | ||
57 | EXPORT_SYMBOL(kunmap); | ||
58 | |||
59 | static void debug_kmap_atomic_prot(enum km_type type) | ||
60 | { | ||
61 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
62 | static unsigned warn_count = 10; | ||
63 | |||
64 | if (unlikely(warn_count == 0)) | ||
65 | return; | ||
66 | |||
67 | if (unlikely(in_interrupt())) { | ||
68 | if (in_irq()) { | ||
69 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
70 | type != KM_BIO_SRC_IRQ && | ||
71 | /* type != KM_BIO_DST_IRQ && */ | ||
72 | type != KM_BOUNCE_READ) { | ||
73 | WARN_ON(1); | ||
74 | warn_count--; | ||
75 | } | ||
76 | } else if (!irqs_disabled()) { /* softirq */ | ||
77 | if (type != KM_IRQ0 && type != KM_IRQ1 && | ||
78 | type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 && | ||
79 | type != KM_SKB_SUNRPC_DATA && | ||
80 | type != KM_SKB_DATA_SOFTIRQ && | ||
81 | type != KM_BOUNCE_READ) { | ||
82 | WARN_ON(1); | ||
83 | warn_count--; | ||
84 | } | ||
85 | } | ||
86 | } | ||
87 | |||
88 | if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ || | ||
89 | type == KM_BIO_SRC_IRQ /* || type == KM_BIO_DST_IRQ */) { | ||
90 | if (!irqs_disabled()) { | ||
91 | WARN_ON(1); | ||
92 | warn_count--; | ||
93 | } | ||
94 | } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) { | ||
95 | if (irq_count() == 0 && !irqs_disabled()) { | ||
96 | WARN_ON(1); | ||
97 | warn_count--; | ||
98 | } | ||
99 | } | ||
100 | #endif | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Describe a single atomic mapping of a page on a given cpu at a | ||
105 | * given address, and allow it to be linked into a list. | ||
106 | */ | ||
107 | struct atomic_mapped_page { | ||
108 | struct list_head list; | ||
109 | struct page *page; | ||
110 | int cpu; | ||
111 | unsigned long va; | ||
112 | }; | ||
113 | |||
114 | static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock); | ||
115 | static struct list_head amp_list = LIST_HEAD_INIT(amp_list); | ||
116 | |||
117 | /* | ||
118 | * Combining this structure with a per-cpu declaration lets us give | ||
119 | * each cpu an atomic_mapped_page structure per type. | ||
120 | */ | ||
121 | struct kmap_amps { | ||
122 | struct atomic_mapped_page per_type[KM_TYPE_NR]; | ||
123 | }; | ||
124 | DEFINE_PER_CPU(struct kmap_amps, amps); | ||
125 | |||
126 | /* | ||
127 | * Add a page and va, on this cpu, to the list of kmap_atomic pages, | ||
128 | * and write the new pte to memory. Writing the new PTE under the | ||
129 | * lock guarantees that it is either on the list before migration starts | ||
130 | * (if we won the race), or set_pte() sets the migrating bit in the PTE | ||
131 | * (if we lost the race). And doing it under the lock guarantees | ||
132 | * that when kmap_atomic_fix_one_pte() comes along, it finds a valid | ||
133 | * PTE in memory, iff the mapping is still on the amp_list. | ||
134 | * | ||
135 | * Finally, doing it under the lock lets us safely examine the page | ||
136 | * to see if it is immutable or not, for the generic kmap_atomic() case. | ||
137 | * If we examine it earlier we are exposed to a race where it looks | ||
138 | * writable earlier, but becomes immutable before we write the PTE. | ||
139 | */ | ||
140 | static void kmap_atomic_register(struct page *page, enum km_type type, | ||
141 | unsigned long va, pte_t *ptep, pte_t pteval) | ||
142 | { | ||
143 | unsigned long flags; | ||
144 | struct atomic_mapped_page *amp; | ||
145 | |||
146 | flags = homecache_kpte_lock(); | ||
147 | spin_lock(&_lock); | ||
148 | |||
149 | /* With interrupts disabled, now fill in the per-cpu info. */ | ||
150 | amp = &__get_cpu_var(amps).per_type[type]; | ||
151 | amp->page = page; | ||
152 | amp->cpu = smp_processor_id(); | ||
153 | amp->va = va; | ||
154 | |||
155 | /* For generic kmap_atomic(), choose the PTE writability now. */ | ||
156 | if (!pte_read(pteval)) | ||
157 | pteval = mk_pte(page, page_to_kpgprot(page)); | ||
158 | |||
159 | list_add(&->list, &_list); | ||
160 | set_pte(ptep, pteval); | ||
161 | arch_flush_lazy_mmu_mode(); | ||
162 | |||
163 | spin_unlock(&_lock); | ||
164 | homecache_kpte_unlock(flags); | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * Remove a page and va, on this cpu, from the list of kmap_atomic pages. | ||
169 | * Linear-time search, but we count on the lists being short. | ||
170 | * We don't need to adjust the PTE under the lock (as opposed to the | ||
171 | * kmap_atomic_register() case), since we're just unconditionally | ||
172 | * zeroing the PTE after it's off the list. | ||
173 | */ | ||
174 | static void kmap_atomic_unregister(struct page *page, unsigned long va) | ||
175 | { | ||
176 | unsigned long flags; | ||
177 | struct atomic_mapped_page *amp; | ||
178 | int cpu = smp_processor_id(); | ||
179 | spin_lock_irqsave(&_lock, flags); | ||
180 | list_for_each_entry(amp, &_list, list) { | ||
181 | if (amp->page == page && amp->cpu == cpu && amp->va == va) | ||
182 | break; | ||
183 | } | ||
184 | BUG_ON(&->list == &_list); | ||
185 | list_del(&->list); | ||
186 | spin_unlock_irqrestore(&_lock, flags); | ||
187 | } | ||
188 | |||
189 | /* Helper routine for kmap_atomic_fix_kpte(), below. */ | ||
190 | static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp, | ||
191 | int finished) | ||
192 | { | ||
193 | pte_t *ptep = kmap_get_pte(amp->va); | ||
194 | if (!finished) { | ||
195 | set_pte(ptep, pte_mkmigrate(*ptep)); | ||
196 | flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE, | ||
197 | cpumask_of(amp->cpu), NULL, 0); | ||
198 | } else { | ||
199 | /* | ||
200 | * Rewrite a default kernel PTE for this page. | ||
201 | * We rely on the fact that set_pte() writes the | ||
202 | * present+migrating bits last. | ||
203 | */ | ||
204 | pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page)); | ||
205 | set_pte(ptep, pte); | ||
206 | } | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * This routine is a helper function for homecache_fix_kpte(); see | ||
211 | * its comments for more information on the "finished" argument here. | ||
212 | * | ||
213 | * Note that we hold the lock while doing the remote flushes, which | ||
214 | * will stall any unrelated cpus trying to do kmap_atomic operations. | ||
215 | * We could just update the PTEs under the lock, and save away copies | ||
216 | * of the structs (or just the va+cpu), then flush them after we | ||
217 | * release the lock, but it seems easier just to do it all under the lock. | ||
218 | */ | ||
219 | void kmap_atomic_fix_kpte(struct page *page, int finished) | ||
220 | { | ||
221 | struct atomic_mapped_page *amp; | ||
222 | unsigned long flags; | ||
223 | spin_lock_irqsave(&_lock, flags); | ||
224 | list_for_each_entry(amp, &_list, list) { | ||
225 | if (amp->page == page) | ||
226 | kmap_atomic_fix_one_kpte(amp, finished); | ||
227 | } | ||
228 | spin_unlock_irqrestore(&_lock, flags); | ||
229 | } | ||
230 | |||
231 | /* | ||
232 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap | ||
233 | * because the kmap code must perform a global TLB invalidation when | ||
234 | * the kmap pool wraps. | ||
235 | * | ||
236 | * Note that they may be slower than on x86 (etc.) because unlike on | ||
237 | * those platforms, we do have to take a global lock to map and unmap | ||
238 | * pages on Tile (see above). | ||
239 | * | ||
240 | * When holding an atomic kmap is is not legal to sleep, so atomic | ||
241 | * kmaps are appropriate for short, tight code paths only. | ||
242 | */ | ||
243 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | ||
244 | { | ||
245 | enum fixed_addresses idx; | ||
246 | unsigned long vaddr; | ||
247 | pte_t *pte; | ||
248 | |||
249 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | ||
250 | pagefault_disable(); | ||
251 | |||
252 | /* Avoid icache flushes by disallowing atomic executable mappings. */ | ||
253 | BUG_ON(pte_exec(prot)); | ||
254 | |||
255 | if (!PageHighMem(page)) | ||
256 | return page_address(page); | ||
257 | |||
258 | debug_kmap_atomic_prot(type); | ||
259 | |||
260 | idx = type + KM_TYPE_NR*smp_processor_id(); | ||
261 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
262 | pte = kmap_get_pte(vaddr); | ||
263 | BUG_ON(!pte_none(*pte)); | ||
264 | |||
265 | /* Register that this page is mapped atomically on this cpu. */ | ||
266 | kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); | ||
267 | |||
268 | return (void *)vaddr; | ||
269 | } | ||
270 | EXPORT_SYMBOL(kmap_atomic_prot); | ||
271 | |||
272 | void *kmap_atomic(struct page *page, enum km_type type) | ||
273 | { | ||
274 | /* PAGE_NONE is a magic value that tells us to check immutability. */ | ||
275 | return kmap_atomic_prot(page, type, PAGE_NONE); | ||
276 | } | ||
277 | EXPORT_SYMBOL(kmap_atomic); | ||
278 | |||
279 | void kunmap_atomic(void *kvaddr, enum km_type type) | ||
280 | { | ||
281 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | ||
282 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | ||
283 | |||
284 | /* | ||
285 | * Force other mappings to Oops if they try to access this pte without | ||
286 | * first remapping it. Keeping stale mappings around is a bad idea. | ||
287 | */ | ||
288 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) { | ||
289 | pte_t *pte = kmap_get_pte(vaddr); | ||
290 | pte_t pteval = *pte; | ||
291 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); | ||
292 | kmap_atomic_unregister(pte_page(pteval), vaddr); | ||
293 | kpte_clear_flush(pte, vaddr); | ||
294 | } else { | ||
295 | /* Must be a lowmem page */ | ||
296 | BUG_ON(vaddr < PAGE_OFFSET); | ||
297 | BUG_ON(vaddr >= (unsigned long)high_memory); | ||
298 | } | ||
299 | |||
300 | arch_flush_lazy_mmu_mode(); | ||
301 | pagefault_enable(); | ||
302 | } | ||
303 | EXPORT_SYMBOL(kunmap_atomic); | ||
304 | |||
305 | /* | ||
306 | * This API is supposed to allow us to map memory without a "struct page". | ||
307 | * Currently we don't support this, though this may change in the future. | ||
308 | */ | ||
309 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | ||
310 | { | ||
311 | return kmap_atomic(pfn_to_page(pfn), type); | ||
312 | } | ||
313 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | ||
314 | { | ||
315 | return kmap_atomic_prot(pfn_to_page(pfn), type, prot); | ||
316 | } | ||
317 | |||
318 | struct page *kmap_atomic_to_page(void *ptr) | ||
319 | { | ||
320 | pte_t *pte; | ||
321 | unsigned long vaddr = (unsigned long)ptr; | ||
322 | |||
323 | if (vaddr < FIXADDR_START) | ||
324 | return virt_to_page(ptr); | ||
325 | |||
326 | pte = kmap_get_pte(vaddr); | ||
327 | return pte_page(*pte); | ||
328 | } | ||
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c new file mode 100644 index 000000000000..52feb77133ce --- /dev/null +++ b/arch/tile/mm/homecache.c | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This code maintains the "home" for each page in the system. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/rmap.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/sysctl.h> | ||
27 | #include <linux/pagevec.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <linux/cache.h> | ||
31 | #include <linux/smp.h> | ||
32 | |||
33 | #include <asm/page.h> | ||
34 | #include <asm/sections.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/homecache.h> | ||
38 | |||
39 | #include "migrate.h" | ||
40 | |||
41 | |||
42 | #if CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
43 | |||
44 | /* | ||
45 | * The noallocl2 option suppresses all use of the L2 cache to cache | ||
46 | * locally from a remote home. There's no point in using it if we | ||
47 | * don't have coherent local caching, though. | ||
48 | */ | ||
49 | int __write_once noallocl2; | ||
50 | static int __init set_noallocl2(char *str) | ||
51 | { | ||
52 | noallocl2 = 1; | ||
53 | return 0; | ||
54 | } | ||
55 | early_param("noallocl2", set_noallocl2); | ||
56 | |||
57 | #else | ||
58 | |||
59 | #define noallocl2 0 | ||
60 | |||
61 | #endif | ||
62 | |||
63 | |||
64 | |||
65 | /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ | ||
66 | #define mark_caches_evicted_start() 0 | ||
67 | #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) | ||
68 | |||
69 | |||
70 | |||
71 | |||
72 | /* | ||
73 | * Update the irq_stat for cpus that we are going to interrupt | ||
74 | * with TLB or cache flushes. Also handle removing dataplane cpus | ||
75 | * from the TLB flush set, and setting dataplane_tlb_state instead. | ||
76 | */ | ||
77 | static void hv_flush_update(const struct cpumask *cache_cpumask, | ||
78 | struct cpumask *tlb_cpumask, | ||
79 | unsigned long tlb_va, unsigned long tlb_length, | ||
80 | HV_Remote_ASID *asids, int asidcount) | ||
81 | { | ||
82 | struct cpumask mask; | ||
83 | int i, cpu; | ||
84 | |||
85 | cpumask_clear(&mask); | ||
86 | if (cache_cpumask) | ||
87 | cpumask_or(&mask, &mask, cache_cpumask); | ||
88 | if (tlb_cpumask && tlb_length) { | ||
89 | cpumask_or(&mask, &mask, tlb_cpumask); | ||
90 | } | ||
91 | |||
92 | for (i = 0; i < asidcount; ++i) | ||
93 | cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask); | ||
94 | |||
95 | /* | ||
96 | * Don't bother to update atomically; losing a count | ||
97 | * here is not that critical. | ||
98 | */ | ||
99 | for_each_cpu(cpu, &mask) | ||
100 | ++per_cpu(irq_stat, cpu).irq_hv_flush_count; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * This wrapper function around hv_flush_remote() does several things: | ||
105 | * | ||
106 | * - Provides a return value error-checking panic path, since | ||
107 | * there's never any good reason for hv_flush_remote() to fail. | ||
108 | * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally | ||
109 | * is the type that Linux wants to pass around anyway. | ||
110 | * - Centralizes the mark_caches_evicted() handling. | ||
111 | * - Canonicalizes that lengths of zero make cpumasks NULL. | ||
112 | * - Handles deferring TLB flushes for dataplane tiles. | ||
113 | * - Tracks remote interrupts in the per-cpu irq_cpustat_t. | ||
114 | * | ||
115 | * Note that we have to wait until the cache flush completes before | ||
116 | * updating the per-cpu last_cache_flush word, since otherwise another | ||
117 | * concurrent flush can race, conclude the flush has already | ||
118 | * completed, and start to use the page while it's still dirty | ||
119 | * remotely (running concurrently with the actual evict, presumably). | ||
120 | */ | ||
121 | void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | ||
122 | const struct cpumask *cache_cpumask_orig, | ||
123 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
124 | unsigned long tlb_pgsize, | ||
125 | const struct cpumask *tlb_cpumask_orig, | ||
126 | HV_Remote_ASID *asids, int asidcount) | ||
127 | { | ||
128 | int rc; | ||
129 | int timestamp = 0; /* happy compiler */ | ||
130 | struct cpumask cache_cpumask_copy, tlb_cpumask_copy; | ||
131 | struct cpumask *cache_cpumask, *tlb_cpumask; | ||
132 | HV_PhysAddr cache_pa; | ||
133 | char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5]; | ||
134 | |||
135 | mb(); /* provided just to simplify "magic hypervisor" mode */ | ||
136 | |||
137 | /* | ||
138 | * Canonicalize and copy the cpumasks. | ||
139 | */ | ||
140 | if (cache_cpumask_orig && cache_control) { | ||
141 | cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig); | ||
142 | cache_cpumask = &cache_cpumask_copy; | ||
143 | } else { | ||
144 | cpumask_clear(&cache_cpumask_copy); | ||
145 | cache_cpumask = NULL; | ||
146 | } | ||
147 | if (cache_cpumask == NULL) | ||
148 | cache_control = 0; | ||
149 | if (tlb_cpumask_orig && tlb_length) { | ||
150 | cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig); | ||
151 | tlb_cpumask = &tlb_cpumask_copy; | ||
152 | } else { | ||
153 | cpumask_clear(&tlb_cpumask_copy); | ||
154 | tlb_cpumask = NULL; | ||
155 | } | ||
156 | |||
157 | hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, | ||
158 | asids, asidcount); | ||
159 | cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; | ||
160 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
161 | timestamp = mark_caches_evicted_start(); | ||
162 | rc = hv_flush_remote(cache_pa, cache_control, | ||
163 | cpumask_bits(cache_cpumask), | ||
164 | tlb_va, tlb_length, tlb_pgsize, | ||
165 | cpumask_bits(tlb_cpumask), | ||
166 | asids, asidcount); | ||
167 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
168 | mark_caches_evicted_finish(cache_cpumask, timestamp); | ||
169 | if (rc == 0) | ||
170 | return; | ||
171 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | ||
172 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); | ||
173 | |||
174 | printk("hv_flush_remote(%#llx, %#lx, %p [%s]," | ||
175 | " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", | ||
176 | cache_pa, cache_control, cache_cpumask, cache_buf, | ||
177 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, | ||
178 | tlb_cpumask, tlb_buf, | ||
179 | asids, asidcount, rc); | ||
180 | if (asidcount > 0) { | ||
181 | int i; | ||
182 | printk(" asids:"); | ||
183 | for (i = 0; i < asidcount; ++i) | ||
184 | printk(" %d,%d,%d", | ||
185 | asids[i].x, asids[i].y, asids[i].asid); | ||
186 | printk("\n"); | ||
187 | } | ||
188 | panic("Unsafe to continue."); | ||
189 | } | ||
190 | |||
191 | void homecache_evict(const struct cpumask *mask) | ||
192 | { | ||
193 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | ||
194 | } | ||
195 | |||
196 | /* Return a mask of the cpus whose caches currently own these pages. */ | ||
197 | static void homecache_mask(struct page *page, int pages, | ||
198 | struct cpumask *home_mask) | ||
199 | { | ||
200 | int i; | ||
201 | cpumask_clear(home_mask); | ||
202 | for (i = 0; i < pages; ++i) { | ||
203 | int home = page_home(&page[i]); | ||
204 | if (home == PAGE_HOME_IMMUTABLE || | ||
205 | home == PAGE_HOME_INCOHERENT) { | ||
206 | cpumask_copy(home_mask, cpu_possible_mask); | ||
207 | return; | ||
208 | } | ||
209 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
210 | if (home == PAGE_HOME_HASH) { | ||
211 | cpumask_or(home_mask, home_mask, &hash_for_home_map); | ||
212 | continue; | ||
213 | } | ||
214 | #endif | ||
215 | if (home == PAGE_HOME_UNCACHED) | ||
216 | continue; | ||
217 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
218 | cpumask_set_cpu(home, home_mask); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Return the passed length, or zero if it's long enough that we | ||
224 | * believe we should evict the whole L2 cache. | ||
225 | */ | ||
226 | static unsigned long cache_flush_length(unsigned long length) | ||
227 | { | ||
228 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | ||
229 | } | ||
230 | |||
231 | /* On the simulator, confirm lines have been evicted everywhere. */ | ||
232 | static void validate_lines_evicted(unsigned long pfn, size_t length) | ||
233 | { | ||
234 | sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, | ||
235 | (HV_PhysAddr)pfn << PAGE_SHIFT, length); | ||
236 | } | ||
237 | |||
238 | /* Flush a page out of whatever cache(s) it is in. */ | ||
239 | void homecache_flush_cache(struct page *page, int order) | ||
240 | { | ||
241 | int pages = 1 << order; | ||
242 | int length = cache_flush_length(pages * PAGE_SIZE); | ||
243 | unsigned long pfn = page_to_pfn(page); | ||
244 | struct cpumask home_mask; | ||
245 | |||
246 | homecache_mask(page, pages, &home_mask); | ||
247 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | ||
248 | validate_lines_evicted(pfn, pages * PAGE_SIZE); | ||
249 | } | ||
250 | |||
251 | |||
252 | /* Report the home corresponding to a given PTE. */ | ||
253 | static int pte_to_home(pte_t pte) | ||
254 | { | ||
255 | if (hv_pte_get_nc(pte)) | ||
256 | return PAGE_HOME_IMMUTABLE; | ||
257 | switch (hv_pte_get_mode(pte)) { | ||
258 | case HV_PTE_MODE_CACHE_TILE_L3: | ||
259 | return get_remote_cache_cpu(pte); | ||
260 | case HV_PTE_MODE_CACHE_NO_L3: | ||
261 | return PAGE_HOME_INCOHERENT; | ||
262 | case HV_PTE_MODE_UNCACHED: | ||
263 | return PAGE_HOME_UNCACHED; | ||
264 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
265 | case HV_PTE_MODE_CACHE_HASH_L3: | ||
266 | return PAGE_HOME_HASH; | ||
267 | #endif | ||
268 | } | ||
269 | panic("Bad PTE %#llx\n", pte.val); | ||
270 | } | ||
271 | |||
272 | /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */ | ||
273 | pte_t pte_set_home(pte_t pte, int home) | ||
274 | { | ||
275 | /* Check for non-linear file mapping "PTEs" and pass them through. */ | ||
276 | if (pte_file(pte)) | ||
277 | return pte; | ||
278 | |||
279 | #if CHIP_HAS_MMIO() | ||
280 | /* Check for MMIO mappings and pass them through. */ | ||
281 | if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO) | ||
282 | return pte; | ||
283 | #endif | ||
284 | |||
285 | |||
286 | /* | ||
287 | * Only immutable pages get NC mappings. If we have a | ||
288 | * non-coherent PTE, but the underlying page is not | ||
289 | * immutable, it's likely the result of a forced | ||
290 | * caching setting running up against ptrace setting | ||
291 | * the page to be writable underneath. In this case, | ||
292 | * just keep the PTE coherent. | ||
293 | */ | ||
294 | if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { | ||
295 | pte = hv_pte_clear_nc(pte); | ||
296 | printk("non-immutable page incoherently referenced: %#llx\n", | ||
297 | pte.val); | ||
298 | } | ||
299 | |||
300 | switch (home) { | ||
301 | |||
302 | case PAGE_HOME_UNCACHED: | ||
303 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); | ||
304 | break; | ||
305 | |||
306 | case PAGE_HOME_INCOHERENT: | ||
307 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
308 | break; | ||
309 | |||
310 | case PAGE_HOME_IMMUTABLE: | ||
311 | /* | ||
312 | * We could home this page anywhere, since it's immutable, | ||
313 | * but by default just home it to follow "hash_default". | ||
314 | */ | ||
315 | BUG_ON(hv_pte_get_writable(pte)); | ||
316 | if (pte_get_forcecache(pte)) { | ||
317 | /* Upgrade "force any cpu" to "No L3" for immutable. */ | ||
318 | if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3 | ||
319 | && pte_get_anyhome(pte)) { | ||
320 | pte = hv_pte_set_mode(pte, | ||
321 | HV_PTE_MODE_CACHE_NO_L3); | ||
322 | } | ||
323 | } else | ||
324 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
325 | if (hash_default) | ||
326 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); | ||
327 | else | ||
328 | #endif | ||
329 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
330 | pte = hv_pte_set_nc(pte); | ||
331 | break; | ||
332 | |||
333 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
334 | case PAGE_HOME_HASH: | ||
335 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); | ||
336 | break; | ||
337 | #endif | ||
338 | |||
339 | default: | ||
340 | BUG_ON(home < 0 || home >= NR_CPUS || | ||
341 | !cpu_is_valid_lotar(home)); | ||
342 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
343 | pte = set_remote_cache_cpu(pte, home); | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | #if CHIP_HAS_NC_AND_NOALLOC_BITS() | ||
348 | if (noallocl2) | ||
349 | pte = hv_pte_set_no_alloc_l2(pte); | ||
350 | |||
351 | /* Simplify "no local and no l3" to "uncached" */ | ||
352 | if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) && | ||
353 | hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { | ||
354 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); | ||
355 | } | ||
356 | #endif | ||
357 | |||
358 | /* Checking this case here gives a better panic than from the hv. */ | ||
359 | BUG_ON(hv_pte_get_mode(pte) == 0); | ||
360 | |||
361 | return pte; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * The routines in this section are the "static" versions of the normal | ||
366 | * dynamic homecaching routines; they just set the home cache | ||
367 | * of a kernel page once, and require a full-chip cache/TLB flush, | ||
368 | * so they're not suitable for anything but infrequent use. | ||
369 | */ | ||
370 | |||
371 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
372 | static inline int initial_page_home(void) { return PAGE_HOME_HASH; } | ||
373 | #else | ||
374 | static inline int initial_page_home(void) { return 0; } | ||
375 | #endif | ||
376 | |||
377 | int page_home(struct page *page) | ||
378 | { | ||
379 | if (PageHighMem(page)) { | ||
380 | return initial_page_home(); | ||
381 | } else { | ||
382 | unsigned long kva = (unsigned long)page_address(page); | ||
383 | return pte_to_home(*virt_to_pte(NULL, kva)); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | void homecache_change_page_home(struct page *page, int order, int home) | ||
388 | { | ||
389 | int i, pages = (1 << order); | ||
390 | unsigned long kva; | ||
391 | |||
392 | BUG_ON(PageHighMem(page)); | ||
393 | BUG_ON(page_count(page) > 1); | ||
394 | BUG_ON(page_mapcount(page) != 0); | ||
395 | kva = (unsigned long) page_address(page); | ||
396 | flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, | ||
397 | kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, | ||
398 | NULL, 0); | ||
399 | |||
400 | for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { | ||
401 | pte_t *ptep = virt_to_pte(NULL, kva); | ||
402 | pte_t pteval = *ptep; | ||
403 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); | ||
404 | *ptep = pte_set_home(pteval, home); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
409 | unsigned int order, int home) | ||
410 | { | ||
411 | struct page *page; | ||
412 | BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ | ||
413 | page = alloc_pages(gfp_mask, order); | ||
414 | if (page) | ||
415 | homecache_change_page_home(page, order, home); | ||
416 | return page; | ||
417 | } | ||
418 | |||
419 | struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
420 | unsigned int order, int home) | ||
421 | { | ||
422 | struct page *page; | ||
423 | BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ | ||
424 | page = alloc_pages_node(nid, gfp_mask, order); | ||
425 | if (page) | ||
426 | homecache_change_page_home(page, order, home); | ||
427 | return page; | ||
428 | } | ||
429 | |||
430 | void homecache_free_pages(unsigned long addr, unsigned int order) | ||
431 | { | ||
432 | struct page *page; | ||
433 | |||
434 | if (addr == 0) | ||
435 | return; | ||
436 | |||
437 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||
438 | page = virt_to_page((void *)addr); | ||
439 | if (put_page_testzero(page)) { | ||
440 | int pages = (1 << order); | ||
441 | homecache_change_page_home(page, order, initial_page_home()); | ||
442 | while (pages--) | ||
443 | __free_page(page++); | ||
444 | } | ||
445 | } | ||
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c new file mode 100644 index 000000000000..c38570f8f0d0 --- /dev/null +++ b/arch/tile/mm/hugetlbpage.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * TILE Huge TLB Page Support for Kernel. | ||
15 | * Taken from i386 hugetlb implementation: | ||
16 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/hugetlb.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/smp_lock.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/sysctl.h> | ||
28 | #include <linux/mman.h> | ||
29 | #include <asm/tlb.h> | ||
30 | #include <asm/tlbflush.h> | ||
31 | |||
32 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
33 | unsigned long addr, unsigned long sz) | ||
34 | { | ||
35 | pgd_t *pgd; | ||
36 | pud_t *pud; | ||
37 | pte_t *pte = NULL; | ||
38 | |||
39 | /* We do not yet support multiple huge page sizes. */ | ||
40 | BUG_ON(sz != PMD_SIZE); | ||
41 | |||
42 | pgd = pgd_offset(mm, addr); | ||
43 | pud = pud_alloc(mm, pgd, addr); | ||
44 | if (pud) | ||
45 | pte = (pte_t *) pmd_alloc(mm, pud, addr); | ||
46 | BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); | ||
47 | |||
48 | return pte; | ||
49 | } | ||
50 | |||
51 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
52 | { | ||
53 | pgd_t *pgd; | ||
54 | pud_t *pud; | ||
55 | pmd_t *pmd = NULL; | ||
56 | |||
57 | pgd = pgd_offset(mm, addr); | ||
58 | if (pgd_present(*pgd)) { | ||
59 | pud = pud_offset(pgd, addr); | ||
60 | if (pud_present(*pud)) | ||
61 | pmd = pmd_offset(pud, addr); | ||
62 | } | ||
63 | return (pte_t *) pmd; | ||
64 | } | ||
65 | |||
66 | #ifdef HUGETLB_TEST | ||
67 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
68 | int write) | ||
69 | { | ||
70 | unsigned long start = address; | ||
71 | int length = 1; | ||
72 | int nr; | ||
73 | struct page *page; | ||
74 | struct vm_area_struct *vma; | ||
75 | |||
76 | vma = find_vma(mm, addr); | ||
77 | if (!vma || !is_vm_hugetlb_page(vma)) | ||
78 | return ERR_PTR(-EINVAL); | ||
79 | |||
80 | pte = huge_pte_offset(mm, address); | ||
81 | |||
82 | /* hugetlb should be locked, and hence, prefaulted */ | ||
83 | WARN_ON(!pte || pte_none(*pte)); | ||
84 | |||
85 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | ||
86 | |||
87 | WARN_ON(!PageHead(page)); | ||
88 | |||
89 | return page; | ||
90 | } | ||
91 | |||
92 | int pmd_huge(pmd_t pmd) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | int pud_huge(pud_t pud) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
103 | pmd_t *pmd, int write) | ||
104 | { | ||
105 | return NULL; | ||
106 | } | ||
107 | |||
108 | #else | ||
109 | |||
110 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
111 | int write) | ||
112 | { | ||
113 | return ERR_PTR(-EINVAL); | ||
114 | } | ||
115 | |||
116 | int pmd_huge(pmd_t pmd) | ||
117 | { | ||
118 | return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE); | ||
119 | } | ||
120 | |||
121 | int pud_huge(pud_t pud) | ||
122 | { | ||
123 | return !!(pud_val(pud) & _PAGE_HUGE_PAGE); | ||
124 | } | ||
125 | |||
126 | struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
127 | pmd_t *pmd, int write) | ||
128 | { | ||
129 | struct page *page; | ||
130 | |||
131 | page = pte_page(*(pte_t *)pmd); | ||
132 | if (page) | ||
133 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | ||
134 | return page; | ||
135 | } | ||
136 | |||
137 | struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address, | ||
138 | pud_t *pud, int write) | ||
139 | { | ||
140 | struct page *page; | ||
141 | |||
142 | page = pte_page(*(pte_t *)pud); | ||
143 | if (page) | ||
144 | page += ((address & ~PUD_MASK) >> PAGE_SHIFT); | ||
145 | return page; | ||
146 | } | ||
147 | |||
148 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
149 | { | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | #endif | ||
154 | |||
155 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
156 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, | ||
157 | unsigned long addr, unsigned long len, | ||
158 | unsigned long pgoff, unsigned long flags) | ||
159 | { | ||
160 | struct hstate *h = hstate_file(file); | ||
161 | struct mm_struct *mm = current->mm; | ||
162 | struct vm_area_struct *vma; | ||
163 | unsigned long start_addr; | ||
164 | |||
165 | if (len > mm->cached_hole_size) { | ||
166 | start_addr = mm->free_area_cache; | ||
167 | } else { | ||
168 | start_addr = TASK_UNMAPPED_BASE; | ||
169 | mm->cached_hole_size = 0; | ||
170 | } | ||
171 | |||
172 | full_search: | ||
173 | addr = ALIGN(start_addr, huge_page_size(h)); | ||
174 | |||
175 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { | ||
176 | /* At this point: (!vma || addr < vma->vm_end). */ | ||
177 | if (TASK_SIZE - len < addr) { | ||
178 | /* | ||
179 | * Start a new search - just in case we missed | ||
180 | * some holes. | ||
181 | */ | ||
182 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
183 | start_addr = TASK_UNMAPPED_BASE; | ||
184 | mm->cached_hole_size = 0; | ||
185 | goto full_search; | ||
186 | } | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | if (!vma || addr + len <= vma->vm_start) { | ||
190 | mm->free_area_cache = addr + len; | ||
191 | return addr; | ||
192 | } | ||
193 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
194 | mm->cached_hole_size = vma->vm_start - addr; | ||
195 | addr = ALIGN(vma->vm_end, huge_page_size(h)); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, | ||
200 | unsigned long addr0, unsigned long len, | ||
201 | unsigned long pgoff, unsigned long flags) | ||
202 | { | ||
203 | struct hstate *h = hstate_file(file); | ||
204 | struct mm_struct *mm = current->mm; | ||
205 | struct vm_area_struct *vma, *prev_vma; | ||
206 | unsigned long base = mm->mmap_base, addr = addr0; | ||
207 | unsigned long largest_hole = mm->cached_hole_size; | ||
208 | int first_time = 1; | ||
209 | |||
210 | /* don't allow allocations above current base */ | ||
211 | if (mm->free_area_cache > base) | ||
212 | mm->free_area_cache = base; | ||
213 | |||
214 | if (len <= largest_hole) { | ||
215 | largest_hole = 0; | ||
216 | mm->free_area_cache = base; | ||
217 | } | ||
218 | try_again: | ||
219 | /* make sure it can fit in the remaining address space */ | ||
220 | if (mm->free_area_cache < len) | ||
221 | goto fail; | ||
222 | |||
223 | /* either no address requested or cant fit in requested address hole */ | ||
224 | addr = (mm->free_area_cache - len) & huge_page_mask(h); | ||
225 | do { | ||
226 | /* | ||
227 | * Lookup failure means no vma is above this address, | ||
228 | * i.e. return with success: | ||
229 | */ | ||
230 | vma = find_vma_prev(mm, addr, &prev_vma); | ||
231 | if (!vma) { | ||
232 | return addr; | ||
233 | break; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * new region fits between prev_vma->vm_end and | ||
238 | * vma->vm_start, use it: | ||
239 | */ | ||
240 | if (addr + len <= vma->vm_start && | ||
241 | (!prev_vma || (addr >= prev_vma->vm_end))) { | ||
242 | /* remember the address as a hint for next time */ | ||
243 | mm->cached_hole_size = largest_hole; | ||
244 | mm->free_area_cache = addr; | ||
245 | return addr; | ||
246 | } else { | ||
247 | /* pull free_area_cache down to the first hole */ | ||
248 | if (mm->free_area_cache == vma->vm_end) { | ||
249 | mm->free_area_cache = vma->vm_start; | ||
250 | mm->cached_hole_size = largest_hole; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* remember the largest hole we saw so far */ | ||
255 | if (addr + largest_hole < vma->vm_start) | ||
256 | largest_hole = vma->vm_start - addr; | ||
257 | |||
258 | /* try just below the current vma->vm_start */ | ||
259 | addr = (vma->vm_start - len) & huge_page_mask(h); | ||
260 | |||
261 | } while (len <= vma->vm_start); | ||
262 | |||
263 | fail: | ||
264 | /* | ||
265 | * if hint left us with no space for the requested | ||
266 | * mapping then try again: | ||
267 | */ | ||
268 | if (first_time) { | ||
269 | mm->free_area_cache = base; | ||
270 | largest_hole = 0; | ||
271 | first_time = 0; | ||
272 | goto try_again; | ||
273 | } | ||
274 | /* | ||
275 | * A failed mmap() very likely causes application failure, | ||
276 | * so fall back to the bottom-up function here. This scenario | ||
277 | * can happen with large stack limits and large mmap() | ||
278 | * allocations. | ||
279 | */ | ||
280 | mm->free_area_cache = TASK_UNMAPPED_BASE; | ||
281 | mm->cached_hole_size = ~0UL; | ||
282 | addr = hugetlb_get_unmapped_area_bottomup(file, addr0, | ||
283 | len, pgoff, flags); | ||
284 | |||
285 | /* | ||
286 | * Restore the topdown base: | ||
287 | */ | ||
288 | mm->free_area_cache = base; | ||
289 | mm->cached_hole_size = ~0UL; | ||
290 | |||
291 | return addr; | ||
292 | } | ||
293 | |||
294 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | ||
295 | unsigned long len, unsigned long pgoff, unsigned long flags) | ||
296 | { | ||
297 | struct hstate *h = hstate_file(file); | ||
298 | struct mm_struct *mm = current->mm; | ||
299 | struct vm_area_struct *vma; | ||
300 | |||
301 | if (len & ~huge_page_mask(h)) | ||
302 | return -EINVAL; | ||
303 | if (len > TASK_SIZE) | ||
304 | return -ENOMEM; | ||
305 | |||
306 | if (flags & MAP_FIXED) { | ||
307 | if (prepare_hugepage_range(file, addr, len)) | ||
308 | return -EINVAL; | ||
309 | return addr; | ||
310 | } | ||
311 | |||
312 | if (addr) { | ||
313 | addr = ALIGN(addr, huge_page_size(h)); | ||
314 | vma = find_vma(mm, addr); | ||
315 | if (TASK_SIZE - len >= addr && | ||
316 | (!vma || addr + len <= vma->vm_start)) | ||
317 | return addr; | ||
318 | } | ||
319 | if (current->mm->get_unmapped_area == arch_get_unmapped_area) | ||
320 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, | ||
321 | pgoff, flags); | ||
322 | else | ||
323 | return hugetlb_get_unmapped_area_topdown(file, addr, len, | ||
324 | pgoff, flags); | ||
325 | } | ||
326 | |||
327 | static __init int setup_hugepagesz(char *opt) | ||
328 | { | ||
329 | unsigned long ps = memparse(opt, &opt); | ||
330 | if (ps == PMD_SIZE) { | ||
331 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | ||
332 | } else if (ps == PUD_SIZE) { | ||
333 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | ||
334 | } else { | ||
335 | printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n", | ||
336 | ps >> 20); | ||
337 | return 0; | ||
338 | } | ||
339 | return 1; | ||
340 | } | ||
341 | __setup("hugepagesz=", setup_hugepagesz); | ||
342 | |||
343 | #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ | ||
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c new file mode 100644 index 000000000000..125ac53b60fc --- /dev/null +++ b/arch/tile/mm/init.c | |||
@@ -0,0 +1,1082 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1995 Linus Torvalds | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/signal.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/hugetlb.h> | ||
27 | #include <linux/swap.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/highmem.h> | ||
31 | #include <linux/pagemap.h> | ||
32 | #include <linux/poison.h> | ||
33 | #include <linux/bootmem.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | #include <linux/efi.h> | ||
37 | #include <linux/memory_hotplug.h> | ||
38 | #include <linux/uaccess.h> | ||
39 | #include <asm/mmu_context.h> | ||
40 | #include <asm/processor.h> | ||
41 | #include <asm/system.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/pgalloc.h> | ||
44 | #include <asm/dma.h> | ||
45 | #include <asm/fixmap.h> | ||
46 | #include <asm/tlb.h> | ||
47 | #include <asm/tlbflush.h> | ||
48 | #include <asm/sections.h> | ||
49 | #include <asm/setup.h> | ||
50 | #include <asm/homecache.h> | ||
51 | #include <hv/hypervisor.h> | ||
52 | #include <arch/chip.h> | ||
53 | |||
54 | #include "migrate.h" | ||
55 | |||
56 | /* | ||
57 | * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)" | ||
58 | * in the Tile Kconfig, but this generates configure warnings. | ||
59 | * Do it here and force people to get it right to compile this file. | ||
60 | * The problem is that with 4KB small pages and 16MB huge pages, | ||
61 | * the default value doesn't allow us to group enough small pages | ||
62 | * together to make up a huge page. | ||
63 | */ | ||
64 | #if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1 | ||
65 | # error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size" | ||
66 | #endif | ||
67 | |||
68 | #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0)) | ||
69 | |||
70 | unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE; | ||
71 | |||
72 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
73 | |||
74 | /* Create an L2 page table */ | ||
75 | static pte_t * __init alloc_pte(void) | ||
76 | { | ||
77 | return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * L2 page tables per controller. We allocate these all at once from | ||
82 | * the bootmem allocator and store them here. This saves on kernel L2 | ||
83 | * page table memory, compared to allocating a full 64K page per L2 | ||
84 | * page table, and also means that in cases where we use huge pages, | ||
85 | * we are guaranteed to later be able to shatter those huge pages and | ||
86 | * switch to using these page tables instead, without requiring | ||
87 | * further allocation. Each l2_ptes[] entry points to the first page | ||
88 | * table for the first hugepage-size piece of memory on the | ||
89 | * controller; other page tables are just indexed directly, i.e. the | ||
90 | * L2 page tables are contiguous in memory for each controller. | ||
91 | */ | ||
92 | static pte_t *l2_ptes[MAX_NUMNODES]; | ||
93 | static int num_l2_ptes[MAX_NUMNODES]; | ||
94 | |||
95 | static void init_prealloc_ptes(int node, int pages) | ||
96 | { | ||
97 | BUG_ON(pages & (HV_L2_ENTRIES-1)); | ||
98 | if (pages) { | ||
99 | num_l2_ptes[node] = pages; | ||
100 | l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), | ||
101 | HV_PAGE_TABLE_ALIGN, 0); | ||
102 | } | ||
103 | } | ||
104 | |||
105 | pte_t *get_prealloc_pte(unsigned long pfn) | ||
106 | { | ||
107 | int node = pfn_to_nid(pfn); | ||
108 | pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)); | ||
109 | BUG_ON(node >= MAX_NUMNODES); | ||
110 | BUG_ON(pfn >= num_l2_ptes[node]); | ||
111 | return &l2_ptes[node][pfn]; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * What caching do we expect pages from the heap to have when | ||
116 | * they are allocated during bootup? (Once we've installed the | ||
117 | * "real" swapper_pg_dir.) | ||
118 | */ | ||
119 | static int initial_heap_home(void) | ||
120 | { | ||
121 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
122 | if (hash_default) | ||
123 | return PAGE_HOME_HASH; | ||
124 | #endif | ||
125 | return smp_processor_id(); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Place a pointer to an L2 page table in a middle page | ||
130 | * directory entry. | ||
131 | */ | ||
132 | static void __init assign_pte(pmd_t *pmd, pte_t *page_table) | ||
133 | { | ||
134 | phys_addr_t pa = __pa(page_table); | ||
135 | unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN; | ||
136 | pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn); | ||
137 | BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0); | ||
138 | pteval = pte_set_home(pteval, initial_heap_home()); | ||
139 | *(pte_t *)pmd = pteval; | ||
140 | if (page_table != (pte_t *)pmd_page_vaddr(*pmd)) | ||
141 | BUG(); | ||
142 | } | ||
143 | |||
144 | #ifdef __tilegx__ | ||
145 | |||
146 | #if HV_L1_SIZE != HV_L2_SIZE | ||
147 | # error Rework assumption that L1 and L2 page tables are same size. | ||
148 | #endif | ||
149 | |||
150 | /* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */ | ||
151 | static inline pmd_t *alloc_pmd(void) | ||
152 | { | ||
153 | return (pmd_t *)alloc_pte(); | ||
154 | } | ||
155 | |||
156 | static inline void assign_pmd(pud_t *pud, pmd_t *pmd) | ||
157 | { | ||
158 | assign_pte((pmd_t *)pud, (pte_t *)pmd); | ||
159 | } | ||
160 | |||
161 | #endif /* __tilegx__ */ | ||
162 | |||
163 | /* Replace the given pmd with a full PTE table. */ | ||
164 | void __init shatter_pmd(pmd_t *pmd) | ||
165 | { | ||
166 | pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd)); | ||
167 | assign_pte(pmd, pte); | ||
168 | } | ||
169 | |||
170 | #ifdef CONFIG_HIGHMEM | ||
171 | /* | ||
172 | * This function initializes a certain range of kernel virtual memory | ||
173 | * with new bootmem page tables, everywhere page tables are missing in | ||
174 | * the given range. | ||
175 | */ | ||
176 | |||
177 | /* | ||
178 | * NOTE: The pagetables are allocated contiguous on the physical space | ||
179 | * so we can cache the place of the first one and move around without | ||
180 | * checking the pgd every time. | ||
181 | */ | ||
182 | static void __init page_table_range_init(unsigned long start, | ||
183 | unsigned long end, pgd_t *pgd_base) | ||
184 | { | ||
185 | pgd_t *pgd; | ||
186 | int pgd_idx; | ||
187 | unsigned long vaddr; | ||
188 | |||
189 | vaddr = start; | ||
190 | pgd_idx = pgd_index(vaddr); | ||
191 | pgd = pgd_base + pgd_idx; | ||
192 | |||
193 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | ||
194 | pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr); | ||
195 | if (pmd_none(*pmd)) | ||
196 | assign_pte(pmd, alloc_pte()); | ||
197 | vaddr += PMD_SIZE; | ||
198 | } | ||
199 | } | ||
200 | #endif /* CONFIG_HIGHMEM */ | ||
201 | |||
202 | |||
203 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
204 | |||
205 | static int __initdata ktext_hash = 1; /* .text pages */ | ||
206 | static int __initdata kdata_hash = 1; /* .data and .bss pages */ | ||
207 | int __write_once hash_default = 1; /* kernel allocator pages */ | ||
208 | EXPORT_SYMBOL(hash_default); | ||
209 | int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ | ||
210 | #endif /* CHIP_HAS_CBOX_HOME_MAP */ | ||
211 | |||
212 | /* | ||
213 | * CPUs to use to for striping the pages of kernel data. If hash-for-home | ||
214 | * is available, this is only relevant if kcache_hash sets up the | ||
215 | * .data and .bss to be page-homed, and we don't want the default mode | ||
216 | * of using the full set of kernel cpus for the striping. | ||
217 | */ | ||
218 | static __initdata struct cpumask kdata_mask; | ||
219 | static __initdata int kdata_arg_seen; | ||
220 | |||
221 | int __write_once kdata_huge; /* if no homecaching, small pages */ | ||
222 | |||
223 | |||
224 | /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */ | ||
225 | static pgprot_t __init construct_pgprot(pgprot_t prot, int home) | ||
226 | { | ||
227 | prot = pte_set_home(prot, home); | ||
228 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
229 | if (home == PAGE_HOME_IMMUTABLE) { | ||
230 | if (ktext_hash) | ||
231 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); | ||
232 | else | ||
233 | prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); | ||
234 | } | ||
235 | #endif | ||
236 | return prot; | ||
237 | } | ||
238 | |||
239 | /* | ||
240 | * For a given kernel data VA, how should it be cached? | ||
241 | * We return the complete pgprot_t with caching bits set. | ||
242 | */ | ||
243 | static pgprot_t __init init_pgprot(ulong address) | ||
244 | { | ||
245 | int cpu; | ||
246 | unsigned long page; | ||
247 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | ||
248 | |||
249 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
250 | /* For kdata=huge, everything is just hash-for-home. */ | ||
251 | if (kdata_huge) | ||
252 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | ||
253 | #endif | ||
254 | |||
255 | /* We map the aliased pages of permanent text inaccessible. */ | ||
256 | if (address < (ulong) _sinittext - CODE_DELTA) | ||
257 | return PAGE_NONE; | ||
258 | |||
259 | /* | ||
260 | * We map read-only data non-coherent for performance. We could | ||
261 | * use neighborhood caching on TILE64, but it's not clear it's a win. | ||
262 | */ | ||
263 | if ((address >= (ulong) __start_rodata && | ||
264 | address < (ulong) __end_rodata) || | ||
265 | address == (ulong) empty_zero_page) { | ||
266 | return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE); | ||
267 | } | ||
268 | |||
269 | /* As a performance optimization, keep the boot init stack here. */ | ||
270 | if (address >= (ulong)&init_thread_union && | ||
271 | address < (ulong)&init_thread_union + THREAD_SIZE) | ||
272 | return construct_pgprot(PAGE_KERNEL, smp_processor_id()); | ||
273 | |||
274 | #ifndef __tilegx__ | ||
275 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
276 | /* Force the atomic_locks[] array page to be hash-for-home. */ | ||
277 | if (address == (ulong) atomic_locks) | ||
278 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | ||
279 | #endif | ||
280 | #endif | ||
281 | |||
282 | /* | ||
283 | * Everything else that isn't data or bss is heap, so mark it | ||
284 | * with the initial heap home (hash-for-home, or this cpu). This | ||
285 | * includes any addresses after the loaded image; any address before | ||
286 | * _einittext (since we already captured the case of text before | ||
287 | * _sinittext); and any init-data pages. | ||
288 | * | ||
289 | * All the LOWMEM pages that we mark this way will get their | ||
290 | * struct page homecache properly marked later, in set_page_homes(). | ||
291 | * The HIGHMEM pages we leave with a default zero for their | ||
292 | * homes, but with a zero free_time we don't have to actually | ||
293 | * do a flush action the first time we use them, either. | ||
294 | */ | ||
295 | if (address >= (ulong) _end || address < (ulong) _sdata || | ||
296 | (address >= (ulong) _sinitdata && | ||
297 | address < (ulong) _einitdata)) | ||
298 | return construct_pgprot(PAGE_KERNEL, initial_heap_home()); | ||
299 | |||
300 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
301 | /* Use hash-for-home if requested for data/bss. */ | ||
302 | if (kdata_hash) | ||
303 | return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); | ||
304 | #endif | ||
305 | |||
306 | /* | ||
307 | * Otherwise we just hand out consecutive cpus. To avoid | ||
308 | * requiring this function to hold state, we just walk forward from | ||
309 | * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach | ||
310 | * the requested address, while walking cpu home around kdata_mask. | ||
311 | * This is typically no more than a dozen or so iterations. | ||
312 | */ | ||
313 | BUG_ON(_einitdata != __bss_start); | ||
314 | for (page = (ulong)_sdata, cpu = NR_CPUS; ; ) { | ||
315 | cpu = cpumask_next(cpu, &kdata_mask); | ||
316 | if (cpu == NR_CPUS) | ||
317 | cpu = cpumask_first(&kdata_mask); | ||
318 | if (page >= address) | ||
319 | break; | ||
320 | page += PAGE_SIZE; | ||
321 | if (page == (ulong)__start_rodata) | ||
322 | page = (ulong)__end_rodata; | ||
323 | if (page == (ulong)&init_thread_union) | ||
324 | page += THREAD_SIZE; | ||
325 | if (page == (ulong)_sinitdata) | ||
326 | page = (ulong)_einitdata; | ||
327 | if (page == (ulong)empty_zero_page) | ||
328 | page += PAGE_SIZE; | ||
329 | #ifndef __tilegx__ | ||
330 | #if !ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
331 | if (page == (ulong)atomic_locks) | ||
332 | page += PAGE_SIZE; | ||
333 | #endif | ||
334 | #endif | ||
335 | |||
336 | } | ||
337 | return construct_pgprot(PAGE_KERNEL, cpu); | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * This function sets up how we cache the kernel text. If we have | ||
342 | * hash-for-home support, normally that is used instead (see the | ||
343 | * kcache_hash boot flag for more information). But if we end up | ||
344 | * using a page-based caching technique, this option sets up the | ||
345 | * details of that. In addition, the "ktext=nocache" option may | ||
346 | * always be used to disable local caching of text pages, if desired. | ||
347 | */ | ||
348 | |||
349 | static int __initdata ktext_arg_seen; | ||
350 | static int __initdata ktext_small; | ||
351 | static int __initdata ktext_local; | ||
352 | static int __initdata ktext_all; | ||
353 | static int __initdata ktext_nondataplane; | ||
354 | static int __initdata ktext_nocache; | ||
355 | static struct cpumask __initdata ktext_mask; | ||
356 | |||
357 | static int __init setup_ktext(char *str) | ||
358 | { | ||
359 | if (str == NULL) | ||
360 | return -EINVAL; | ||
361 | |||
362 | /* If you have a leading "nocache", turn off ktext caching */ | ||
363 | if (strncmp(str, "nocache", 7) == 0) { | ||
364 | ktext_nocache = 1; | ||
365 | printk("ktext: disabling local caching of kernel text\n"); | ||
366 | str += 7; | ||
367 | if (*str == ',') | ||
368 | ++str; | ||
369 | if (*str == '\0') | ||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | ktext_arg_seen = 1; | ||
374 | |||
375 | /* Default setting on Tile64: use a huge page */ | ||
376 | if (strcmp(str, "huge") == 0) | ||
377 | printk("ktext: using one huge locally cached page\n"); | ||
378 | |||
379 | /* Pay TLB cost but get no cache benefit: cache small pages locally */ | ||
380 | else if (strcmp(str, "local") == 0) { | ||
381 | ktext_small = 1; | ||
382 | ktext_local = 1; | ||
383 | printk("ktext: using small pages with local caching\n"); | ||
384 | } | ||
385 | |||
386 | /* Neighborhood cache ktext pages on all cpus. */ | ||
387 | else if (strcmp(str, "all") == 0) { | ||
388 | ktext_small = 1; | ||
389 | ktext_all = 1; | ||
390 | printk("ktext: using maximal caching neighborhood\n"); | ||
391 | } | ||
392 | |||
393 | |||
394 | /* Neighborhood ktext pages on specified mask */ | ||
395 | else if (cpulist_parse(str, &ktext_mask) == 0) { | ||
396 | char buf[NR_CPUS * 5]; | ||
397 | cpulist_scnprintf(buf, sizeof(buf), &ktext_mask); | ||
398 | if (cpumask_weight(&ktext_mask) > 1) { | ||
399 | ktext_small = 1; | ||
400 | printk("ktext: using caching neighborhood %s " | ||
401 | "with small pages\n", buf); | ||
402 | } else { | ||
403 | printk("ktext: caching on cpu %s with one huge page\n", | ||
404 | buf); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | else if (*str) | ||
409 | return -EINVAL; | ||
410 | |||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | early_param("ktext", setup_ktext); | ||
415 | |||
416 | |||
417 | static inline pgprot_t ktext_set_nocache(pgprot_t prot) | ||
418 | { | ||
419 | if (!ktext_nocache) | ||
420 | prot = hv_pte_set_nc(prot); | ||
421 | #if CHIP_HAS_NC_AND_NOALLOC_BITS() | ||
422 | else | ||
423 | prot = hv_pte_set_no_alloc_l2(prot); | ||
424 | #endif | ||
425 | return prot; | ||
426 | } | ||
427 | |||
428 | #ifndef __tilegx__ | ||
429 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
430 | { | ||
431 | return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); | ||
432 | } | ||
433 | #else | ||
434 | static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va) | ||
435 | { | ||
436 | pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); | ||
437 | if (pud_none(*pud)) | ||
438 | assign_pmd(pud, alloc_pmd()); | ||
439 | return pmd_offset(pud, va); | ||
440 | } | ||
441 | #endif | ||
442 | |||
443 | /* Temporary page table we use for staging. */ | ||
444 | static pgd_t pgtables[PTRS_PER_PGD] | ||
445 | __attribute__((section(".init.page"))); | ||
446 | |||
447 | /* | ||
448 | * This maps the physical memory to kernel virtual address space, a total | ||
449 | * of max_low_pfn pages, by creating page tables starting from address | ||
450 | * PAGE_OFFSET. | ||
451 | * | ||
452 | * This routine transitions us from using a set of compiled-in large | ||
453 | * pages to using some more precise caching, including removing access | ||
454 | * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START) | ||
455 | * marking read-only data as locally cacheable, striping the remaining | ||
456 | * .data and .bss across all the available tiles, and removing access | ||
457 | * to pages above the top of RAM (thus ensuring a page fault from a bad | ||
458 | * virtual address rather than a hypervisor shoot down for accessing | ||
459 | * memory outside the assigned limits). | ||
460 | */ | ||
461 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | ||
462 | { | ||
463 | unsigned long address, pfn; | ||
464 | pmd_t *pmd; | ||
465 | pte_t *pte; | ||
466 | int pte_ofs; | ||
467 | const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id()); | ||
468 | struct cpumask kstripe_mask; | ||
469 | int rc, i; | ||
470 | |||
471 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
472 | if (ktext_arg_seen && ktext_hash) { | ||
473 | printk("warning: \"ktext\" boot argument ignored" | ||
474 | " if \"kcache_hash\" sets up text hash-for-home\n"); | ||
475 | ktext_small = 0; | ||
476 | } | ||
477 | |||
478 | if (kdata_arg_seen && kdata_hash) { | ||
479 | printk("warning: \"kdata\" boot argument ignored" | ||
480 | " if \"kcache_hash\" sets up data hash-for-home\n"); | ||
481 | } | ||
482 | |||
483 | if (kdata_huge && !hash_default) { | ||
484 | printk("warning: disabling \"kdata=huge\"; requires" | ||
485 | " kcache_hash=all or =allbutstack\n"); | ||
486 | kdata_huge = 0; | ||
487 | } | ||
488 | #endif | ||
489 | |||
490 | /* | ||
491 | * Set up a mask for cpus to use for kernel striping. | ||
492 | * This is normally all cpus, but minus dataplane cpus if any. | ||
493 | * If the dataplane covers the whole chip, we stripe over | ||
494 | * the whole chip too. | ||
495 | */ | ||
496 | cpumask_copy(&kstripe_mask, cpu_possible_mask); | ||
497 | if (!kdata_arg_seen) | ||
498 | kdata_mask = kstripe_mask; | ||
499 | |||
500 | /* Allocate and fill in L2 page tables */ | ||
501 | for (i = 0; i < MAX_NUMNODES; ++i) { | ||
502 | #ifdef CONFIG_HIGHMEM | ||
503 | unsigned long end_pfn = node_lowmem_end_pfn[i]; | ||
504 | #else | ||
505 | unsigned long end_pfn = node_end_pfn[i]; | ||
506 | #endif | ||
507 | unsigned long end_huge_pfn = 0; | ||
508 | |||
509 | /* Pre-shatter the last huge page to allow per-cpu pages. */ | ||
510 | if (kdata_huge) | ||
511 | end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT); | ||
512 | |||
513 | pfn = node_start_pfn[i]; | ||
514 | |||
515 | /* Allocate enough memory to hold L2 page tables for node. */ | ||
516 | init_prealloc_ptes(i, end_pfn - pfn); | ||
517 | |||
518 | address = (unsigned long) pfn_to_kaddr(pfn); | ||
519 | while (pfn < end_pfn) { | ||
520 | BUG_ON(address & (HPAGE_SIZE-1)); | ||
521 | pmd = get_pmd(pgtables, address); | ||
522 | pte = get_prealloc_pte(pfn); | ||
523 | if (pfn < end_huge_pfn) { | ||
524 | pgprot_t prot = init_pgprot(address); | ||
525 | *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); | ||
526 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; | ||
527 | pfn++, pte_ofs++, address += PAGE_SIZE) | ||
528 | pte[pte_ofs] = pfn_pte(pfn, prot); | ||
529 | } else { | ||
530 | if (kdata_huge) | ||
531 | printk(KERN_DEBUG "pre-shattered huge" | ||
532 | " page at %#lx\n", address); | ||
533 | for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; | ||
534 | pfn++, pte_ofs++, address += PAGE_SIZE) { | ||
535 | pgprot_t prot = init_pgprot(address); | ||
536 | pte[pte_ofs] = pfn_pte(pfn, prot); | ||
537 | } | ||
538 | assign_pte(pmd, pte); | ||
539 | } | ||
540 | } | ||
541 | } | ||
542 | |||
543 | /* | ||
544 | * Set or check ktext_map now that we have cpu_possible_mask | ||
545 | * and kstripe_mask to work with. | ||
546 | */ | ||
547 | if (ktext_all) | ||
548 | cpumask_copy(&ktext_mask, cpu_possible_mask); | ||
549 | else if (ktext_nondataplane) | ||
550 | ktext_mask = kstripe_mask; | ||
551 | else if (!cpumask_empty(&ktext_mask)) { | ||
552 | /* Sanity-check any mask that was requested */ | ||
553 | struct cpumask bad; | ||
554 | cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask); | ||
555 | cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask); | ||
556 | if (!cpumask_empty(&bad)) { | ||
557 | char buf[NR_CPUS * 5]; | ||
558 | cpulist_scnprintf(buf, sizeof(buf), &bad); | ||
559 | printk("ktext: not using unavailable cpus %s\n", buf); | ||
560 | } | ||
561 | if (cpumask_empty(&ktext_mask)) { | ||
562 | printk("ktext: no valid cpus; caching on %d.\n", | ||
563 | smp_processor_id()); | ||
564 | cpumask_copy(&ktext_mask, | ||
565 | cpumask_of(smp_processor_id())); | ||
566 | } | ||
567 | } | ||
568 | |||
569 | address = MEM_SV_INTRPT; | ||
570 | pmd = get_pmd(pgtables, address); | ||
571 | if (ktext_small) { | ||
572 | /* Allocate an L2 PTE for the kernel text */ | ||
573 | int cpu = 0; | ||
574 | pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC, | ||
575 | PAGE_HOME_IMMUTABLE); | ||
576 | |||
577 | if (ktext_local) { | ||
578 | if (ktext_nocache) | ||
579 | prot = hv_pte_set_mode(prot, | ||
580 | HV_PTE_MODE_UNCACHED); | ||
581 | else | ||
582 | prot = hv_pte_set_mode(prot, | ||
583 | HV_PTE_MODE_CACHE_NO_L3); | ||
584 | } else { | ||
585 | prot = hv_pte_set_mode(prot, | ||
586 | HV_PTE_MODE_CACHE_TILE_L3); | ||
587 | cpu = cpumask_first(&ktext_mask); | ||
588 | |||
589 | prot = ktext_set_nocache(prot); | ||
590 | } | ||
591 | |||
592 | BUG_ON(address != (unsigned long)_stext); | ||
593 | pfn = 0; /* code starts at PA 0 */ | ||
594 | pte = alloc_pte(); | ||
595 | for (pte_ofs = 0; address < (unsigned long)_einittext; | ||
596 | pfn++, pte_ofs++, address += PAGE_SIZE) { | ||
597 | if (!ktext_local) { | ||
598 | prot = set_remote_cache_cpu(prot, cpu); | ||
599 | cpu = cpumask_next(cpu, &ktext_mask); | ||
600 | if (cpu == NR_CPUS) | ||
601 | cpu = cpumask_first(&ktext_mask); | ||
602 | } | ||
603 | pte[pte_ofs] = pfn_pte(pfn, prot); | ||
604 | } | ||
605 | assign_pte(pmd, pte); | ||
606 | } else { | ||
607 | pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); | ||
608 | pteval = pte_mkhuge(pteval); | ||
609 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
610 | if (ktext_hash) { | ||
611 | pteval = hv_pte_set_mode(pteval, | ||
612 | HV_PTE_MODE_CACHE_HASH_L3); | ||
613 | pteval = ktext_set_nocache(pteval); | ||
614 | } else | ||
615 | #endif /* CHIP_HAS_CBOX_HOME_MAP() */ | ||
616 | if (cpumask_weight(&ktext_mask) == 1) { | ||
617 | pteval = set_remote_cache_cpu(pteval, | ||
618 | cpumask_first(&ktext_mask)); | ||
619 | pteval = hv_pte_set_mode(pteval, | ||
620 | HV_PTE_MODE_CACHE_TILE_L3); | ||
621 | pteval = ktext_set_nocache(pteval); | ||
622 | } else if (ktext_nocache) | ||
623 | pteval = hv_pte_set_mode(pteval, | ||
624 | HV_PTE_MODE_UNCACHED); | ||
625 | else | ||
626 | pteval = hv_pte_set_mode(pteval, | ||
627 | HV_PTE_MODE_CACHE_NO_L3); | ||
628 | *(pte_t *)pmd = pteval; | ||
629 | } | ||
630 | |||
631 | /* Set swapper_pgprot here so it is flushed to memory right away. */ | ||
632 | swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir); | ||
633 | |||
634 | /* | ||
635 | * Since we may be changing the caching of the stack and page | ||
636 | * table itself, we invoke an assembly helper to do the | ||
637 | * following steps: | ||
638 | * | ||
639 | * - flush the cache so we start with an empty slate | ||
640 | * - install pgtables[] as the real page table | ||
641 | * - flush the TLB so the new page table takes effect | ||
642 | */ | ||
643 | rc = flush_and_install_context(__pa(pgtables), | ||
644 | init_pgprot((unsigned long)pgtables), | ||
645 | __get_cpu_var(current_asid), | ||
646 | cpumask_bits(my_cpu_mask)); | ||
647 | BUG_ON(rc != 0); | ||
648 | |||
649 | /* Copy the page table back to the normal swapper_pg_dir. */ | ||
650 | memcpy(pgd_base, pgtables, sizeof(pgtables)); | ||
651 | __install_page_table(pgd_base, __get_cpu_var(current_asid), | ||
652 | swapper_pgprot); | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * devmem_is_allowed() checks to see if /dev/mem access to a certain address | ||
657 | * is valid. The argument is a physical page number. | ||
658 | * | ||
659 | * On Tile, the only valid things for which we can just hand out unchecked | ||
660 | * PTEs are the kernel code and data. Anything else might change its | ||
661 | * homing with time, and we wouldn't know to adjust the /dev/mem PTEs. | ||
662 | * Note that init_thread_union is released to heap soon after boot, | ||
663 | * so we include it in the init data. | ||
664 | * | ||
665 | * For TILE-Gx, we might want to consider allowing access to PA | ||
666 | * regions corresponding to PCI space, etc. | ||
667 | */ | ||
668 | int devmem_is_allowed(unsigned long pagenr) | ||
669 | { | ||
670 | return pagenr < kaddr_to_pfn(_end) && | ||
671 | !(pagenr >= kaddr_to_pfn(&init_thread_union) || | ||
672 | pagenr < kaddr_to_pfn(_einitdata)) && | ||
673 | !(pagenr >= kaddr_to_pfn(_sinittext) || | ||
674 | pagenr <= kaddr_to_pfn(_einittext-1)); | ||
675 | } | ||
676 | |||
677 | #ifdef CONFIG_HIGHMEM | ||
678 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | ||
679 | { | ||
680 | pgd_t *pgd; | ||
681 | pud_t *pud; | ||
682 | pmd_t *pmd; | ||
683 | pte_t *pte; | ||
684 | unsigned long vaddr; | ||
685 | |||
686 | vaddr = PKMAP_BASE; | ||
687 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | ||
688 | |||
689 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
690 | pud = pud_offset(pgd, vaddr); | ||
691 | pmd = pmd_offset(pud, vaddr); | ||
692 | pte = pte_offset_kernel(pmd, vaddr); | ||
693 | pkmap_page_table = pte; | ||
694 | } | ||
695 | #endif /* CONFIG_HIGHMEM */ | ||
696 | |||
697 | |||
698 | static void __init init_free_pfn_range(unsigned long start, unsigned long end) | ||
699 | { | ||
700 | unsigned long pfn; | ||
701 | struct page *page = pfn_to_page(start); | ||
702 | |||
703 | for (pfn = start; pfn < end; ) { | ||
704 | /* Optimize by freeing pages in large batches */ | ||
705 | int order = __ffs(pfn); | ||
706 | int count, i; | ||
707 | struct page *p; | ||
708 | |||
709 | if (order >= MAX_ORDER) | ||
710 | order = MAX_ORDER-1; | ||
711 | count = 1 << order; | ||
712 | while (pfn + count > end) { | ||
713 | count >>= 1; | ||
714 | --order; | ||
715 | } | ||
716 | for (p = page, i = 0; i < count; ++i, ++p) { | ||
717 | __ClearPageReserved(p); | ||
718 | /* | ||
719 | * Hacky direct set to avoid unnecessary | ||
720 | * lock take/release for EVERY page here. | ||
721 | */ | ||
722 | p->_count.counter = 0; | ||
723 | p->_mapcount.counter = -1; | ||
724 | } | ||
725 | init_page_count(page); | ||
726 | __free_pages(page, order); | ||
727 | totalram_pages += count; | ||
728 | |||
729 | page += count; | ||
730 | pfn += count; | ||
731 | } | ||
732 | } | ||
733 | |||
734 | static void __init set_non_bootmem_pages_init(void) | ||
735 | { | ||
736 | struct zone *z; | ||
737 | for_each_zone(z) { | ||
738 | unsigned long start, end; | ||
739 | int nid = z->zone_pgdat->node_id; | ||
740 | |||
741 | start = z->zone_start_pfn; | ||
742 | if (start == 0) | ||
743 | continue; /* bootmem */ | ||
744 | end = start + z->spanned_pages; | ||
745 | if (zone_idx(z) == ZONE_NORMAL) { | ||
746 | BUG_ON(start != node_start_pfn[nid]); | ||
747 | start = node_free_pfn[nid]; | ||
748 | } | ||
749 | #ifdef CONFIG_HIGHMEM | ||
750 | if (zone_idx(z) == ZONE_HIGHMEM) | ||
751 | totalhigh_pages += z->spanned_pages; | ||
752 | #endif | ||
753 | if (kdata_huge) { | ||
754 | unsigned long percpu_pfn = node_percpu_pfn[nid]; | ||
755 | if (start < percpu_pfn && end > percpu_pfn) | ||
756 | end = percpu_pfn; | ||
757 | } | ||
758 | #ifdef CONFIG_PCI | ||
759 | if (start <= pci_reserve_start_pfn && | ||
760 | end > pci_reserve_start_pfn) { | ||
761 | if (end > pci_reserve_end_pfn) | ||
762 | init_free_pfn_range(pci_reserve_end_pfn, end); | ||
763 | end = pci_reserve_start_pfn; | ||
764 | } | ||
765 | #endif | ||
766 | init_free_pfn_range(start, end); | ||
767 | } | ||
768 | } | ||
769 | |||
770 | /* | ||
771 | * paging_init() sets up the page tables - note that all of lowmem is | ||
772 | * already mapped by head.S. | ||
773 | */ | ||
774 | void __init paging_init(void) | ||
775 | { | ||
776 | #ifdef CONFIG_HIGHMEM | ||
777 | unsigned long vaddr, end; | ||
778 | #endif | ||
779 | #ifdef __tilegx__ | ||
780 | pud_t *pud; | ||
781 | #endif | ||
782 | pgd_t *pgd_base = swapper_pg_dir; | ||
783 | |||
784 | kernel_physical_mapping_init(pgd_base); | ||
785 | |||
786 | #ifdef CONFIG_HIGHMEM | ||
787 | /* | ||
788 | * Fixed mappings, only the page table structure has to be | ||
789 | * created - mappings will be set by set_fixmap(): | ||
790 | */ | ||
791 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | ||
792 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | ||
793 | page_table_range_init(vaddr, end, pgd_base); | ||
794 | permanent_kmaps_init(pgd_base); | ||
795 | #endif | ||
796 | |||
797 | #ifdef __tilegx__ | ||
798 | /* | ||
799 | * Since GX allocates just one pmd_t array worth of vmalloc space, | ||
800 | * we go ahead and allocate it statically here, then share it | ||
801 | * globally. As a result we don't have to worry about any task | ||
802 | * changing init_mm once we get up and running, and there's no | ||
803 | * need for e.g. vmalloc_sync_all(). | ||
804 | */ | ||
805 | BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); | ||
806 | pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); | ||
807 | assign_pmd(pud, alloc_pmd()); | ||
808 | #endif | ||
809 | } | ||
810 | |||
811 | |||
812 | /* | ||
813 | * Walk the kernel page tables and derive the page_home() from | ||
814 | * the PTEs, so that set_pte() can properly validate the caching | ||
815 | * of all PTEs it sees. | ||
816 | */ | ||
817 | void __init set_page_homes(void) | ||
818 | { | ||
819 | } | ||
820 | |||
821 | static void __init set_max_mapnr_init(void) | ||
822 | { | ||
823 | #ifdef CONFIG_FLATMEM | ||
824 | max_mapnr = max_low_pfn; | ||
825 | #endif | ||
826 | } | ||
827 | |||
828 | void __init mem_init(void) | ||
829 | { | ||
830 | int codesize, datasize, initsize; | ||
831 | int i; | ||
832 | #ifndef __tilegx__ | ||
833 | void *last; | ||
834 | #endif | ||
835 | |||
836 | #ifdef CONFIG_FLATMEM | ||
837 | if (!mem_map) | ||
838 | BUG(); | ||
839 | #endif | ||
840 | |||
841 | #ifdef CONFIG_HIGHMEM | ||
842 | /* check that fixmap and pkmap do not overlap */ | ||
843 | if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) { | ||
844 | printk(KERN_ERR "fixmap and kmap areas overlap" | ||
845 | " - this will crash\n"); | ||
846 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | ||
847 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), | ||
848 | FIXADDR_START); | ||
849 | BUG(); | ||
850 | } | ||
851 | #endif | ||
852 | |||
853 | set_max_mapnr_init(); | ||
854 | |||
855 | /* this will put all bootmem onto the freelists */ | ||
856 | totalram_pages += free_all_bootmem(); | ||
857 | |||
858 | /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ | ||
859 | set_non_bootmem_pages_init(); | ||
860 | |||
861 | codesize = (unsigned long)&_etext - (unsigned long)&_text; | ||
862 | datasize = (unsigned long)&_end - (unsigned long)&_sdata; | ||
863 | initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext; | ||
864 | initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata; | ||
865 | |||
866 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n", | ||
867 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | ||
868 | num_physpages << (PAGE_SHIFT-10), | ||
869 | codesize >> 10, | ||
870 | datasize >> 10, | ||
871 | initsize >> 10, | ||
872 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | ||
873 | ); | ||
874 | |||
875 | /* | ||
876 | * In debug mode, dump some interesting memory mappings. | ||
877 | */ | ||
878 | #ifdef CONFIG_HIGHMEM | ||
879 | printk(KERN_DEBUG " KMAP %#lx - %#lx\n", | ||
880 | FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1); | ||
881 | printk(KERN_DEBUG " PKMAP %#lx - %#lx\n", | ||
882 | PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1); | ||
883 | #endif | ||
884 | #ifdef CONFIG_HUGEVMAP | ||
885 | printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n", | ||
886 | HUGE_VMAP_BASE, HUGE_VMAP_END - 1); | ||
887 | #endif | ||
888 | printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n", | ||
889 | _VMALLOC_START, _VMALLOC_END - 1); | ||
890 | #ifdef __tilegx__ | ||
891 | for (i = MAX_NUMNODES-1; i >= 0; --i) { | ||
892 | struct pglist_data *node = &node_data[i]; | ||
893 | if (node->node_present_pages) { | ||
894 | unsigned long start = (unsigned long) | ||
895 | pfn_to_kaddr(node->node_start_pfn); | ||
896 | unsigned long end = start + | ||
897 | (node->node_present_pages << PAGE_SHIFT); | ||
898 | printk(KERN_DEBUG " MEM%d %#lx - %#lx\n", | ||
899 | i, start, end - 1); | ||
900 | } | ||
901 | } | ||
902 | #else | ||
903 | last = high_memory; | ||
904 | for (i = MAX_NUMNODES-1; i >= 0; --i) { | ||
905 | if ((unsigned long)vbase_map[i] != -1UL) { | ||
906 | printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n", | ||
907 | i, (unsigned long) (vbase_map[i]), | ||
908 | (unsigned long) (last-1)); | ||
909 | last = vbase_map[i]; | ||
910 | } | ||
911 | } | ||
912 | #endif | ||
913 | |||
914 | #ifndef __tilegx__ | ||
915 | /* | ||
916 | * Convert from using one lock for all atomic operations to | ||
917 | * one per cpu. | ||
918 | */ | ||
919 | __init_atomic_per_cpu(); | ||
920 | #endif | ||
921 | } | ||
922 | |||
923 | /* | ||
924 | * this is for the non-NUMA, single node SMP system case. | ||
925 | * Specifically, in the case of x86, we will always add | ||
926 | * memory to the highmem for now. | ||
927 | */ | ||
928 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
929 | int arch_add_memory(u64 start, u64 size) | ||
930 | { | ||
931 | struct pglist_data *pgdata = &contig_page_data; | ||
932 | struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; | ||
933 | unsigned long start_pfn = start >> PAGE_SHIFT; | ||
934 | unsigned long nr_pages = size >> PAGE_SHIFT; | ||
935 | |||
936 | return __add_pages(zone, start_pfn, nr_pages); | ||
937 | } | ||
938 | |||
939 | int remove_memory(u64 start, u64 size) | ||
940 | { | ||
941 | return -EINVAL; | ||
942 | } | ||
943 | #endif | ||
944 | |||
945 | struct kmem_cache *pgd_cache; | ||
946 | |||
947 | void __init pgtable_cache_init(void) | ||
948 | { | ||
949 | pgd_cache = kmem_cache_create("pgd", | ||
950 | PTRS_PER_PGD*sizeof(pgd_t), | ||
951 | PTRS_PER_PGD*sizeof(pgd_t), | ||
952 | 0, | ||
953 | NULL); | ||
954 | if (!pgd_cache) | ||
955 | panic("pgtable_cache_init(): Cannot create pgd cache"); | ||
956 | } | ||
957 | |||
958 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
959 | /* | ||
960 | * The __w1data area holds data that is only written during initialization, | ||
961 | * and is read-only and thus freely cacheable thereafter. Fix the page | ||
962 | * table entries that cover that region accordingly. | ||
963 | */ | ||
964 | static void mark_w1data_ro(void) | ||
965 | { | ||
966 | /* Loop over page table entries */ | ||
967 | unsigned long addr = (unsigned long)__w1data_begin; | ||
968 | BUG_ON((addr & (PAGE_SIZE-1)) != 0); | ||
969 | for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { | ||
970 | unsigned long pfn = kaddr_to_pfn((void *)addr); | ||
971 | struct page *page = pfn_to_page(pfn); | ||
972 | pte_t *ptep = virt_to_pte(NULL, addr); | ||
973 | BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ | ||
974 | set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); | ||
975 | } | ||
976 | } | ||
977 | #endif | ||
978 | |||
979 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
980 | static long __write_once initfree; | ||
981 | #else | ||
982 | static long __write_once initfree = 1; | ||
983 | #endif | ||
984 | |||
985 | /* Select whether to free (1) or mark unusable (0) the __init pages. */ | ||
986 | static int __init set_initfree(char *str) | ||
987 | { | ||
988 | strict_strtol(str, 0, &initfree); | ||
989 | printk("initfree: %s free init pages\n", initfree ? "will" : "won't"); | ||
990 | return 1; | ||
991 | } | ||
992 | __setup("initfree=", set_initfree); | ||
993 | |||
994 | static void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
995 | { | ||
996 | unsigned long addr = (unsigned long) begin; | ||
997 | |||
998 | if (kdata_huge && !initfree) { | ||
999 | printk("Warning: ignoring initfree=0:" | ||
1000 | " incompatible with kdata=huge\n"); | ||
1001 | initfree = 1; | ||
1002 | } | ||
1003 | end = (end + PAGE_SIZE - 1) & PAGE_MASK; | ||
1004 | local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin); | ||
1005 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
1006 | /* | ||
1007 | * Note we just reset the home here directly in the | ||
1008 | * page table. We know this is safe because our caller | ||
1009 | * just flushed the caches on all the other cpus, | ||
1010 | * and they won't be touching any of these pages. | ||
1011 | */ | ||
1012 | int pfn = kaddr_to_pfn((void *)addr); | ||
1013 | struct page *page = pfn_to_page(pfn); | ||
1014 | pte_t *ptep = virt_to_pte(NULL, addr); | ||
1015 | if (!initfree) { | ||
1016 | /* | ||
1017 | * If debugging page accesses then do not free | ||
1018 | * this memory but mark them not present - any | ||
1019 | * buggy init-section access will create a | ||
1020 | * kernel page fault: | ||
1021 | */ | ||
1022 | pte_clear(&init_mm, addr, ptep); | ||
1023 | continue; | ||
1024 | } | ||
1025 | __ClearPageReserved(page); | ||
1026 | init_page_count(page); | ||
1027 | if (pte_huge(*ptep)) | ||
1028 | BUG_ON(!kdata_huge); | ||
1029 | else | ||
1030 | set_pte_at(&init_mm, addr, ptep, | ||
1031 | pfn_pte(pfn, PAGE_KERNEL)); | ||
1032 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
1033 | free_page(addr); | ||
1034 | totalram_pages++; | ||
1035 | } | ||
1036 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | ||
1037 | } | ||
1038 | |||
1039 | void free_initmem(void) | ||
1040 | { | ||
1041 | const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET; | ||
1042 | |||
1043 | /* | ||
1044 | * Evict the dirty initdata on the boot cpu, evict the w1data | ||
1045 | * wherever it's homed, and evict all the init code everywhere. | ||
1046 | * We are guaranteed that no one will touch the init pages any | ||
1047 | * more, and although other cpus may be touching the w1data, | ||
1048 | * we only actually change the caching on tile64, which won't | ||
1049 | * be keeping local copies in the other tiles' caches anyway. | ||
1050 | */ | ||
1051 | homecache_evict(&cpu_cacheable_map); | ||
1052 | |||
1053 | /* Free the data pages that we won't use again after init. */ | ||
1054 | free_init_pages("unused kernel data", | ||
1055 | (unsigned long)_sinitdata, | ||
1056 | (unsigned long)_einitdata); | ||
1057 | |||
1058 | /* | ||
1059 | * Free the pages mapped from 0xc0000000 that correspond to code | ||
1060 | * pages from 0xfd000000 that we won't use again after init. | ||
1061 | */ | ||
1062 | free_init_pages("unused kernel text", | ||
1063 | (unsigned long)_sinittext - text_delta, | ||
1064 | (unsigned long)_einittext - text_delta); | ||
1065 | |||
1066 | #if !CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
1067 | /* | ||
1068 | * Upgrade the .w1data section to globally cached. | ||
1069 | * We don't do this on tilepro, since the cache architecture | ||
1070 | * pretty much makes it irrelevant, and in any case we end | ||
1071 | * up having racing issues with other tiles that may touch | ||
1072 | * the data after we flush the cache but before we update | ||
1073 | * the PTEs and flush the TLBs, causing sharer shootdowns | ||
1074 | * later. Even though this is to clean data, it seems like | ||
1075 | * an unnecessary complication. | ||
1076 | */ | ||
1077 | mark_w1data_ro(); | ||
1078 | #endif | ||
1079 | |||
1080 | /* Do a global TLB flush so everyone sees the changes. */ | ||
1081 | flush_tlb_all(); | ||
1082 | } | ||
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h new file mode 100644 index 000000000000..cd45a0837fa6 --- /dev/null +++ b/arch/tile/mm/migrate.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Structure definitions for migration, exposed here for use by | ||
15 | * arch/tile/kernel/asm-offsets.c. | ||
16 | */ | ||
17 | |||
18 | #ifndef MM_MIGRATE_H | ||
19 | #define MM_MIGRATE_H | ||
20 | |||
21 | #include <linux/cpumask.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* | ||
25 | * This function is used as a helper when setting up the initial | ||
26 | * page table (swapper_pg_dir). | ||
27 | */ | ||
28 | extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access, | ||
29 | HV_ASID asid, | ||
30 | const unsigned long *cpumask); | ||
31 | |||
32 | /* | ||
33 | * This function supports migration as a "helper" as follows: | ||
34 | * | ||
35 | * - Set the stack PTE itself to "migrating". | ||
36 | * - Do a global TLB flush for (va,length) and the specified ASIDs. | ||
37 | * - Do a cache-evict on all necessary cpus. | ||
38 | * - Write the new stack PTE. | ||
39 | * | ||
40 | * Note that any non-NULL pointers must not point to the page that | ||
41 | * is handled by the stack_pte itself. | ||
42 | */ | ||
43 | extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va, | ||
44 | size_t length, pte_t *stack_ptep, | ||
45 | const struct cpumask *cache_cpumask, | ||
46 | const struct cpumask *tlb_cpumask, | ||
47 | HV_Remote_ASID *asids, | ||
48 | int asidcount); | ||
49 | |||
50 | #endif /* MM_MIGRATE_H */ | ||
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S new file mode 100644 index 000000000000..f738765cd1e6 --- /dev/null +++ b/arch/tile/mm/migrate_32.S | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This routine is a helper for migrating the home of a set of pages to | ||
15 | * a new cpu. See the documentation in homecache.c for more information. | ||
16 | */ | ||
17 | |||
18 | #include <linux/linkage.h> | ||
19 | #include <linux/threads.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/types.h> | ||
22 | #include <asm/asm-offsets.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | .text | ||
26 | |||
27 | /* | ||
28 | * First, some definitions that apply to all the code in the file. | ||
29 | */ | ||
30 | |||
31 | /* Locals (caller-save) */ | ||
32 | #define r_tmp r10 | ||
33 | #define r_save_sp r11 | ||
34 | |||
35 | /* What we save where in the stack frame; must include all callee-saves. */ | ||
36 | #define FRAME_SP 4 | ||
37 | #define FRAME_R30 8 | ||
38 | #define FRAME_R31 12 | ||
39 | #define FRAME_R32 16 | ||
40 | #define FRAME_R33 20 | ||
41 | #define FRAME_R34 24 | ||
42 | #define FRAME_R35 28 | ||
43 | #define FRAME_SIZE 32 | ||
44 | |||
45 | |||
46 | |||
47 | |||
48 | /* | ||
49 | * On entry: | ||
50 | * | ||
51 | * r0 low word of the new context PA to install (moved to r_context_lo) | ||
52 | * r1 high word of the new context PA to install (moved to r_context_hi) | ||
53 | * r2 low word of PTE to use for context access (moved to r_access_lo) | ||
54 | * r3 high word of PTE to use for context access (moved to r_access_lo) | ||
55 | * r4 ASID to use for new context (moved to r_asid) | ||
56 | * r5 pointer to cpumask with just this cpu set in it (r_my_cpumask) | ||
57 | */ | ||
58 | |||
59 | /* Arguments (caller-save) */ | ||
60 | #define r_context_lo_in r0 | ||
61 | #define r_context_hi_in r1 | ||
62 | #define r_access_lo_in r2 | ||
63 | #define r_access_hi_in r3 | ||
64 | #define r_asid_in r4 | ||
65 | #define r_my_cpumask r5 | ||
66 | |||
67 | /* Locals (callee-save); must not be more than FRAME_xxx above. */ | ||
68 | #define r_save_ics r30 | ||
69 | #define r_context_lo r31 | ||
70 | #define r_context_hi r32 | ||
71 | #define r_access_lo r33 | ||
72 | #define r_access_hi r34 | ||
73 | #define r_asid r35 | ||
74 | |||
75 | STD_ENTRY(flush_and_install_context) | ||
76 | /* | ||
77 | * Create a stack frame; we can't touch it once we flush the | ||
78 | * cache until we install the new page table and flush the TLB. | ||
79 | */ | ||
80 | { | ||
81 | move r_save_sp, sp | ||
82 | sw sp, lr | ||
83 | addi sp, sp, -FRAME_SIZE | ||
84 | } | ||
85 | addi r_tmp, sp, FRAME_SP | ||
86 | { | ||
87 | sw r_tmp, r_save_sp | ||
88 | addi r_tmp, sp, FRAME_R30 | ||
89 | } | ||
90 | { | ||
91 | sw r_tmp, r30 | ||
92 | addi r_tmp, sp, FRAME_R31 | ||
93 | } | ||
94 | { | ||
95 | sw r_tmp, r31 | ||
96 | addi r_tmp, sp, FRAME_R32 | ||
97 | } | ||
98 | { | ||
99 | sw r_tmp, r32 | ||
100 | addi r_tmp, sp, FRAME_R33 | ||
101 | } | ||
102 | { | ||
103 | sw r_tmp, r33 | ||
104 | addi r_tmp, sp, FRAME_R34 | ||
105 | } | ||
106 | { | ||
107 | sw r_tmp, r34 | ||
108 | addi r_tmp, sp, FRAME_R35 | ||
109 | } | ||
110 | sw r_tmp, r35 | ||
111 | |||
112 | /* Move some arguments to callee-save registers. */ | ||
113 | { | ||
114 | move r_context_lo, r_context_lo_in | ||
115 | move r_context_hi, r_context_hi_in | ||
116 | } | ||
117 | { | ||
118 | move r_access_lo, r_access_lo_in | ||
119 | move r_access_hi, r_access_hi_in | ||
120 | } | ||
121 | move r_asid, r_asid_in | ||
122 | |||
123 | /* Disable interrupts, since we can't use our stack. */ | ||
124 | { | ||
125 | mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION | ||
126 | movei r_tmp, 1 | ||
127 | } | ||
128 | mtspr INTERRUPT_CRITICAL_SECTION, r_tmp | ||
129 | |||
130 | /* First, flush our L2 cache. */ | ||
131 | { | ||
132 | move r0, zero /* cache_pa */ | ||
133 | move r1, zero | ||
134 | } | ||
135 | { | ||
136 | auli r2, zero, ha16(HV_FLUSH_EVICT_L2) /* cache_control */ | ||
137 | move r3, r_my_cpumask /* cache_cpumask */ | ||
138 | } | ||
139 | { | ||
140 | move r4, zero /* tlb_va */ | ||
141 | move r5, zero /* tlb_length */ | ||
142 | } | ||
143 | { | ||
144 | move r6, zero /* tlb_pgsize */ | ||
145 | move r7, zero /* tlb_cpumask */ | ||
146 | } | ||
147 | { | ||
148 | move r8, zero /* asids */ | ||
149 | move r9, zero /* asidcount */ | ||
150 | } | ||
151 | jal hv_flush_remote | ||
152 | bnz r0, .Ldone | ||
153 | |||
154 | /* Now install the new page table. */ | ||
155 | { | ||
156 | move r0, r_context_lo | ||
157 | move r1, r_context_hi | ||
158 | } | ||
159 | { | ||
160 | move r2, r_access_lo | ||
161 | move r3, r_access_hi | ||
162 | } | ||
163 | { | ||
164 | move r4, r_asid | ||
165 | movei r5, HV_CTX_DIRECTIO | ||
166 | } | ||
167 | jal hv_install_context | ||
168 | bnz r0, .Ldone | ||
169 | |||
170 | /* Finally, flush the TLB. */ | ||
171 | { | ||
172 | movei r0, 0 /* preserve_global */ | ||
173 | jal hv_flush_all | ||
174 | } | ||
175 | |||
176 | .Ldone: | ||
177 | /* Reset interrupts back how they were before. */ | ||
178 | mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics | ||
179 | |||
180 | /* Restore the callee-saved registers and return. */ | ||
181 | addli lr, sp, FRAME_SIZE | ||
182 | { | ||
183 | lw lr, lr | ||
184 | addli r_tmp, sp, FRAME_R30 | ||
185 | } | ||
186 | { | ||
187 | lw r30, r_tmp | ||
188 | addli r_tmp, sp, FRAME_R31 | ||
189 | } | ||
190 | { | ||
191 | lw r31, r_tmp | ||
192 | addli r_tmp, sp, FRAME_R32 | ||
193 | } | ||
194 | { | ||
195 | lw r32, r_tmp | ||
196 | addli r_tmp, sp, FRAME_R33 | ||
197 | } | ||
198 | { | ||
199 | lw r33, r_tmp | ||
200 | addli r_tmp, sp, FRAME_R34 | ||
201 | } | ||
202 | { | ||
203 | lw r34, r_tmp | ||
204 | addli r_tmp, sp, FRAME_R35 | ||
205 | } | ||
206 | { | ||
207 | lw r35, r_tmp | ||
208 | addi sp, sp, FRAME_SIZE | ||
209 | } | ||
210 | jrp lr | ||
211 | STD_ENDPROC(flush_and_install_context) | ||
diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c new file mode 100644 index 000000000000..f96f4cec602a --- /dev/null +++ b/arch/tile/mm/mmap.c | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Taken from the i386 architecture and simplified. | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/limits.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/mman.h> | ||
22 | #include <linux/compat.h> | ||
23 | |||
24 | /* | ||
25 | * Top of mmap area (just below the process stack). | ||
26 | * | ||
27 | * Leave an at least ~128 MB hole. | ||
28 | */ | ||
29 | #define MIN_GAP (128*1024*1024) | ||
30 | #define MAX_GAP (TASK_SIZE/6*5) | ||
31 | |||
32 | static inline unsigned long mmap_base(struct mm_struct *mm) | ||
33 | { | ||
34 | unsigned long gap = rlimit(RLIMIT_STACK); | ||
35 | unsigned long random_factor = 0; | ||
36 | |||
37 | if (current->flags & PF_RANDOMIZE) | ||
38 | random_factor = get_random_int() % (1024*1024); | ||
39 | |||
40 | if (gap < MIN_GAP) | ||
41 | gap = MIN_GAP; | ||
42 | else if (gap > MAX_GAP) | ||
43 | gap = MAX_GAP; | ||
44 | |||
45 | return PAGE_ALIGN(TASK_SIZE - gap - random_factor); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * This function, called very early during the creation of a new | ||
50 | * process VM image, sets up which VM layout function to use: | ||
51 | */ | ||
52 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
53 | { | ||
54 | #if !defined(__tilegx__) | ||
55 | int is_32bit = 1; | ||
56 | #elif defined(CONFIG_COMPAT) | ||
57 | int is_32bit = is_compat_task(); | ||
58 | #else | ||
59 | int is_32bit = 0; | ||
60 | #endif | ||
61 | |||
62 | /* | ||
63 | * Use standard layout if the expected stack growth is unlimited | ||
64 | * or we are running native 64 bits. | ||
65 | */ | ||
66 | if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { | ||
67 | mm->mmap_base = TASK_UNMAPPED_BASE; | ||
68 | mm->get_unmapped_area = arch_get_unmapped_area; | ||
69 | mm->unmap_area = arch_unmap_area; | ||
70 | } else { | ||
71 | mm->mmap_base = mmap_base(mm); | ||
72 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | ||
73 | mm->unmap_area = arch_unmap_area_topdown; | ||
74 | } | ||
75 | } | ||
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c new file mode 100644 index 000000000000..289e729bbd76 --- /dev/null +++ b/arch/tile/mm/pgtable.c | |||
@@ -0,0 +1,566 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/sched.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/swap.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/highmem.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/cpumask.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/vmalloc.h> | ||
29 | #include <linux/smp.h> | ||
30 | |||
31 | #include <asm/system.h> | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/pgalloc.h> | ||
34 | #include <asm/fixmap.h> | ||
35 | #include <asm/tlb.h> | ||
36 | #include <asm/tlbflush.h> | ||
37 | #include <asm/homecache.h> | ||
38 | |||
39 | #define K(x) ((x) << (PAGE_SHIFT-10)) | ||
40 | |||
41 | /* | ||
42 | * The normal show_free_areas() is too verbose on Tile, with dozens | ||
43 | * of processors and often four NUMA zones each with high and lowmem. | ||
44 | */ | ||
45 | void show_mem(void) | ||
46 | { | ||
47 | struct zone *zone; | ||
48 | |||
49 | printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu" | ||
50 | " free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu" | ||
51 | " pagecache:%lu swap:%lu\n", | ||
52 | (global_page_state(NR_ACTIVE_ANON) + | ||
53 | global_page_state(NR_ACTIVE_FILE)), | ||
54 | (global_page_state(NR_INACTIVE_ANON) + | ||
55 | global_page_state(NR_INACTIVE_FILE)), | ||
56 | global_page_state(NR_FILE_DIRTY), | ||
57 | global_page_state(NR_WRITEBACK), | ||
58 | global_page_state(NR_UNSTABLE_NFS), | ||
59 | global_page_state(NR_FREE_PAGES), | ||
60 | (global_page_state(NR_SLAB_RECLAIMABLE) + | ||
61 | global_page_state(NR_SLAB_UNRECLAIMABLE)), | ||
62 | global_page_state(NR_FILE_MAPPED), | ||
63 | global_page_state(NR_PAGETABLE), | ||
64 | global_page_state(NR_BOUNCE), | ||
65 | global_page_state(NR_FILE_PAGES), | ||
66 | nr_swap_pages); | ||
67 | |||
68 | for_each_zone(zone) { | ||
69 | unsigned long flags, order, total = 0, largest_order = -1; | ||
70 | |||
71 | if (!populated_zone(zone)) | ||
72 | continue; | ||
73 | |||
74 | printk("Node %d %7s: ", zone_to_nid(zone), zone->name); | ||
75 | spin_lock_irqsave(&zone->lock, flags); | ||
76 | for (order = 0; order < MAX_ORDER; order++) { | ||
77 | int nr = zone->free_area[order].nr_free; | ||
78 | total += nr << order; | ||
79 | if (nr) | ||
80 | largest_order = order; | ||
81 | } | ||
82 | spin_unlock_irqrestore(&zone->lock, flags); | ||
83 | printk("%lukB (largest %luKb)\n", | ||
84 | K(total), largest_order ? K(1UL) << largest_order : 0); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Associate a virtual page frame with a given physical page frame | ||
90 | * and protection flags for that frame. | ||
91 | */ | ||
92 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | ||
93 | { | ||
94 | pgd_t *pgd; | ||
95 | pud_t *pud; | ||
96 | pmd_t *pmd; | ||
97 | pte_t *pte; | ||
98 | |||
99 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
100 | if (pgd_none(*pgd)) { | ||
101 | BUG(); | ||
102 | return; | ||
103 | } | ||
104 | pud = pud_offset(pgd, vaddr); | ||
105 | if (pud_none(*pud)) { | ||
106 | BUG(); | ||
107 | return; | ||
108 | } | ||
109 | pmd = pmd_offset(pud, vaddr); | ||
110 | if (pmd_none(*pmd)) { | ||
111 | BUG(); | ||
112 | return; | ||
113 | } | ||
114 | pte = pte_offset_kernel(pmd, vaddr); | ||
115 | /* <pfn,flags> stored as-is, to permit clearing entries */ | ||
116 | set_pte(pte, pfn_pte(pfn, flags)); | ||
117 | |||
118 | /* | ||
119 | * It's enough to flush this one mapping. | ||
120 | * This appears conservative since it is only called | ||
121 | * from __set_fixmap. | ||
122 | */ | ||
123 | local_flush_tlb_page(NULL, vaddr, PAGE_SIZE); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Associate a huge virtual page frame with a given physical page frame | ||
128 | * and protection flags for that frame. pfn is for the base of the page, | ||
129 | * vaddr is what the page gets mapped to - both must be properly aligned. | ||
130 | * The pmd must already be instantiated. | ||
131 | */ | ||
132 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | ||
133 | { | ||
134 | pgd_t *pgd; | ||
135 | pud_t *pud; | ||
136 | pmd_t *pmd; | ||
137 | |||
138 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ | ||
139 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); | ||
140 | return; /* BUG(); */ | ||
141 | } | ||
142 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ | ||
143 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); | ||
144 | return; /* BUG(); */ | ||
145 | } | ||
146 | pgd = swapper_pg_dir + pgd_index(vaddr); | ||
147 | if (pgd_none(*pgd)) { | ||
148 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); | ||
149 | return; /* BUG(); */ | ||
150 | } | ||
151 | pud = pud_offset(pgd, vaddr); | ||
152 | pmd = pmd_offset(pud, vaddr); | ||
153 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(pfn), flags)); | ||
154 | /* | ||
155 | * It's enough to flush this one mapping. | ||
156 | * We flush both small and huge TSBs to be sure. | ||
157 | */ | ||
158 | local_flush_tlb_page(NULL, vaddr, HPAGE_SIZE); | ||
159 | local_flush_tlb_pages(NULL, vaddr, PAGE_SIZE, HPAGE_SIZE); | ||
160 | } | ||
161 | |||
162 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags) | ||
163 | { | ||
164 | unsigned long address = __fix_to_virt(idx); | ||
165 | |||
166 | if (idx >= __end_of_fixed_addresses) { | ||
167 | BUG(); | ||
168 | return; | ||
169 | } | ||
170 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); | ||
171 | } | ||
172 | |||
173 | #if defined(CONFIG_HIGHPTE) | ||
174 | pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) | ||
175 | { | ||
176 | pte_t *pte = kmap_atomic(pmd_page(*dir), type) + | ||
177 | (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; | ||
178 | return &pte[pte_index(address)]; | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | /* | ||
183 | * List of all pgd's needed so it can invalidate entries in both cached | ||
184 | * and uncached pgd's. This is essentially codepath-based locking | ||
185 | * against pageattr.c; it is the unique case in which a valid change | ||
186 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | ||
187 | * vmalloc faults work because attached pagetables are never freed. | ||
188 | * The locking scheme was chosen on the basis of manfred's | ||
189 | * recommendations and having no core impact whatsoever. | ||
190 | * -- wli | ||
191 | */ | ||
192 | DEFINE_SPINLOCK(pgd_lock); | ||
193 | LIST_HEAD(pgd_list); | ||
194 | |||
195 | static inline void pgd_list_add(pgd_t *pgd) | ||
196 | { | ||
197 | list_add(pgd_to_list(pgd), &pgd_list); | ||
198 | } | ||
199 | |||
200 | static inline void pgd_list_del(pgd_t *pgd) | ||
201 | { | ||
202 | list_del(pgd_to_list(pgd)); | ||
203 | } | ||
204 | |||
205 | #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET) | ||
206 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START) | ||
207 | |||
208 | static void pgd_ctor(pgd_t *pgd) | ||
209 | { | ||
210 | unsigned long flags; | ||
211 | |||
212 | memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t)); | ||
213 | spin_lock_irqsave(&pgd_lock, flags); | ||
214 | |||
215 | #ifndef __tilegx__ | ||
216 | /* | ||
217 | * Check that the user interrupt vector has no L2. | ||
218 | * It never should for the swapper, and new page tables | ||
219 | * should always start with an empty user interrupt vector. | ||
220 | */ | ||
221 | BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0); | ||
222 | #endif | ||
223 | |||
224 | clone_pgd_range(pgd + KERNEL_PGD_INDEX_START, | ||
225 | swapper_pg_dir + KERNEL_PGD_INDEX_START, | ||
226 | KERNEL_PGD_PTRS); | ||
227 | |||
228 | pgd_list_add(pgd); | ||
229 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
230 | } | ||
231 | |||
232 | static void pgd_dtor(pgd_t *pgd) | ||
233 | { | ||
234 | unsigned long flags; /* can be called from interrupt context */ | ||
235 | |||
236 | spin_lock_irqsave(&pgd_lock, flags); | ||
237 | pgd_list_del(pgd); | ||
238 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
239 | } | ||
240 | |||
241 | pgd_t *pgd_alloc(struct mm_struct *mm) | ||
242 | { | ||
243 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); | ||
244 | if (pgd) | ||
245 | pgd_ctor(pgd); | ||
246 | return pgd; | ||
247 | } | ||
248 | |||
249 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | ||
250 | { | ||
251 | pgd_dtor(pgd); | ||
252 | kmem_cache_free(pgd_cache, pgd); | ||
253 | } | ||
254 | |||
255 | |||
256 | #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) | ||
257 | |||
258 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | ||
259 | { | ||
260 | int flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO|__GFP_COMP; | ||
261 | struct page *p; | ||
262 | |||
263 | #ifdef CONFIG_HIGHPTE | ||
264 | flags |= __GFP_HIGHMEM; | ||
265 | #endif | ||
266 | |||
267 | p = alloc_pages(flags, L2_USER_PGTABLE_ORDER); | ||
268 | if (p == NULL) | ||
269 | return NULL; | ||
270 | |||
271 | pgtable_page_ctor(p); | ||
272 | return p; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Free page immediately (used in __pte_alloc if we raced with another | ||
277 | * process). We have to correct whatever pte_alloc_one() did before | ||
278 | * returning the pages to the allocator. | ||
279 | */ | ||
280 | void pte_free(struct mm_struct *mm, struct page *p) | ||
281 | { | ||
282 | pgtable_page_dtor(p); | ||
283 | __free_pages(p, L2_USER_PGTABLE_ORDER); | ||
284 | } | ||
285 | |||
286 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
287 | unsigned long address) | ||
288 | { | ||
289 | int i; | ||
290 | |||
291 | pgtable_page_dtor(pte); | ||
292 | tlb->need_flush = 1; | ||
293 | if (tlb_fast_mode(tlb)) { | ||
294 | struct page *pte_pages[L2_USER_PGTABLE_PAGES]; | ||
295 | for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) | ||
296 | pte_pages[i] = pte + i; | ||
297 | free_pages_and_swap_cache(pte_pages, L2_USER_PGTABLE_PAGES); | ||
298 | return; | ||
299 | } | ||
300 | for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) { | ||
301 | tlb->pages[tlb->nr++] = pte + i; | ||
302 | if (tlb->nr >= FREE_PTE_NR) | ||
303 | tlb_flush_mmu(tlb, 0, 0); | ||
304 | } | ||
305 | } | ||
306 | |||
307 | #ifndef __tilegx__ | ||
308 | |||
309 | /* | ||
310 | * FIXME: needs to be atomic vs hypervisor writes. For now we make the | ||
311 | * window of vulnerability a bit smaller by doing an unlocked 8-bit update. | ||
312 | */ | ||
313 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | ||
314 | unsigned long addr, pte_t *ptep) | ||
315 | { | ||
316 | #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16 | ||
317 | # error Code assumes HV_PTE "accessed" bit in second byte | ||
318 | #endif | ||
319 | u8 *tmp = (u8 *)ptep; | ||
320 | u8 second_byte = tmp[1]; | ||
321 | if (!(second_byte & (1 << (HV_PTE_INDEX_ACCESSED - 8)))) | ||
322 | return 0; | ||
323 | tmp[1] = second_byte & ~(1 << (HV_PTE_INDEX_ACCESSED - 8)); | ||
324 | return 1; | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * This implementation is atomic vs hypervisor writes, since the hypervisor | ||
329 | * always writes the low word (where "accessed" and "dirty" are) and this | ||
330 | * routine only writes the high word. | ||
331 | */ | ||
332 | void ptep_set_wrprotect(struct mm_struct *mm, | ||
333 | unsigned long addr, pte_t *ptep) | ||
334 | { | ||
335 | #if HV_PTE_INDEX_WRITABLE < 32 | ||
336 | # error Code assumes HV_PTE "writable" bit in high word | ||
337 | #endif | ||
338 | u32 *tmp = (u32 *)ptep; | ||
339 | tmp[1] = tmp[1] & ~(1 << (HV_PTE_INDEX_WRITABLE - 32)); | ||
340 | } | ||
341 | |||
342 | #endif | ||
343 | |||
344 | pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) | ||
345 | { | ||
346 | pgd_t *pgd; | ||
347 | pud_t *pud; | ||
348 | pmd_t *pmd; | ||
349 | |||
350 | if (pgd_addr_invalid(addr)) | ||
351 | return NULL; | ||
352 | |||
353 | pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr); | ||
354 | pud = pud_offset(pgd, addr); | ||
355 | if (!pud_present(*pud)) | ||
356 | return NULL; | ||
357 | pmd = pmd_offset(pud, addr); | ||
358 | if (pmd_huge_page(*pmd)) | ||
359 | return (pte_t *)pmd; | ||
360 | if (!pmd_present(*pmd)) | ||
361 | return NULL; | ||
362 | return pte_offset_kernel(pmd, addr); | ||
363 | } | ||
364 | |||
365 | pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) | ||
366 | { | ||
367 | unsigned int width = smp_width; | ||
368 | int x = cpu % width; | ||
369 | int y = cpu / width; | ||
370 | BUG_ON(y >= smp_height); | ||
371 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | ||
372 | BUG_ON(cpu < 0 || cpu >= NR_CPUS); | ||
373 | BUG_ON(!cpu_is_valid_lotar(cpu)); | ||
374 | return hv_pte_set_lotar(prot, HV_XY_TO_LOTAR(x, y)); | ||
375 | } | ||
376 | |||
377 | int get_remote_cache_cpu(pgprot_t prot) | ||
378 | { | ||
379 | HV_LOTAR lotar = hv_pte_get_lotar(prot); | ||
380 | int x = HV_LOTAR_X(lotar); | ||
381 | int y = HV_LOTAR_Y(lotar); | ||
382 | BUG_ON(hv_pte_get_mode(prot) != HV_PTE_MODE_CACHE_TILE_L3); | ||
383 | return x + y * smp_width; | ||
384 | } | ||
385 | |||
386 | void set_pte_order(pte_t *ptep, pte_t pte, int order) | ||
387 | { | ||
388 | unsigned long pfn = pte_pfn(pte); | ||
389 | struct page *page = pfn_to_page(pfn); | ||
390 | |||
391 | /* Update the home of a PTE if necessary */ | ||
392 | pte = pte_set_home(pte, page_home(page)); | ||
393 | |||
394 | #ifdef __tilegx__ | ||
395 | *ptep = pte; | ||
396 | #else | ||
397 | /* | ||
398 | * When setting a PTE, write the high bits first, then write | ||
399 | * the low bits. This sets the "present" bit only after the | ||
400 | * other bits are in place. If a particular PTE update | ||
401 | * involves transitioning from one valid PTE to another, it | ||
402 | * may be necessary to call set_pte_order() more than once, | ||
403 | * transitioning via a suitable intermediate state. | ||
404 | * Note that this sequence also means that if we are transitioning | ||
405 | * from any migrating PTE to a non-migrating one, we will not | ||
406 | * see a half-updated PTE with the migrating bit off. | ||
407 | */ | ||
408 | #if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32 | ||
409 | # error Must write the present and migrating bits last | ||
410 | #endif | ||
411 | ((u32 *)ptep)[1] = (u32)(pte_val(pte) >> 32); | ||
412 | barrier(); | ||
413 | ((u32 *)ptep)[0] = (u32)(pte_val(pte)); | ||
414 | #endif | ||
415 | } | ||
416 | |||
417 | /* Can this mm load a PTE with cached_priority set? */ | ||
418 | static inline int mm_is_priority_cached(struct mm_struct *mm) | ||
419 | { | ||
420 | return mm->context.priority_cached; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * Add a priority mapping to an mm_context and | ||
425 | * notify the hypervisor if this is the first one. | ||
426 | */ | ||
427 | void start_mm_caching(struct mm_struct *mm) | ||
428 | { | ||
429 | if (!mm_is_priority_cached(mm)) { | ||
430 | mm->context.priority_cached = -1U; | ||
431 | hv_set_caching(-1U); | ||
432 | } | ||
433 | } | ||
434 | |||
435 | /* | ||
436 | * Validate and return the priority_cached flag. We know if it's zero | ||
437 | * that we don't need to scan, since we immediately set it non-zero | ||
438 | * when we first consider a MAP_CACHE_PRIORITY mapping. | ||
439 | * | ||
440 | * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it, | ||
441 | * since we're in an interrupt context (servicing switch_mm) we don't | ||
442 | * worry about it and don't unset the "priority_cached" field. | ||
443 | * Presumably we'll come back later and have more luck and clear | ||
444 | * the value then; for now we'll just keep the cache marked for priority. | ||
445 | */ | ||
446 | static unsigned int update_priority_cached(struct mm_struct *mm) | ||
447 | { | ||
448 | if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { | ||
449 | struct vm_area_struct *vm; | ||
450 | for (vm = mm->mmap; vm; vm = vm->vm_next) { | ||
451 | if (hv_pte_get_cached_priority(vm->vm_page_prot)) | ||
452 | break; | ||
453 | } | ||
454 | if (vm == NULL) | ||
455 | mm->context.priority_cached = 0; | ||
456 | up_write(&mm->mmap_sem); | ||
457 | } | ||
458 | return mm->context.priority_cached; | ||
459 | } | ||
460 | |||
461 | /* Set caching correctly for an mm that we are switching to. */ | ||
462 | void check_mm_caching(struct mm_struct *prev, struct mm_struct *next) | ||
463 | { | ||
464 | if (!mm_is_priority_cached(next)) { | ||
465 | /* | ||
466 | * If the new mm doesn't use priority caching, just see if we | ||
467 | * need the hv_set_caching(), or can assume it's already zero. | ||
468 | */ | ||
469 | if (mm_is_priority_cached(prev)) | ||
470 | hv_set_caching(0); | ||
471 | } else { | ||
472 | hv_set_caching(update_priority_cached(next)); | ||
473 | } | ||
474 | } | ||
475 | |||
476 | #if CHIP_HAS_MMIO() | ||
477 | |||
478 | /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */ | ||
479 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | ||
480 | pgprot_t home) | ||
481 | { | ||
482 | void *addr; | ||
483 | struct vm_struct *area; | ||
484 | unsigned long offset, last_addr; | ||
485 | pgprot_t pgprot; | ||
486 | |||
487 | /* Don't allow wraparound or zero size */ | ||
488 | last_addr = phys_addr + size - 1; | ||
489 | if (!size || last_addr < phys_addr) | ||
490 | return NULL; | ||
491 | |||
492 | /* Create a read/write, MMIO VA mapping homed at the requested shim. */ | ||
493 | pgprot = PAGE_KERNEL; | ||
494 | pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO); | ||
495 | pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home)); | ||
496 | |||
497 | /* | ||
498 | * Mappings have to be page-aligned | ||
499 | */ | ||
500 | offset = phys_addr & ~PAGE_MASK; | ||
501 | phys_addr &= PAGE_MASK; | ||
502 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
503 | |||
504 | /* | ||
505 | * Ok, go for it.. | ||
506 | */ | ||
507 | area = get_vm_area(size, VM_IOREMAP /* | other flags? */); | ||
508 | if (!area) | ||
509 | return NULL; | ||
510 | area->phys_addr = phys_addr; | ||
511 | addr = area->addr; | ||
512 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | ||
513 | phys_addr, pgprot)) { | ||
514 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | ||
515 | return NULL; | ||
516 | } | ||
517 | return (__force void __iomem *) (offset + (char *)addr); | ||
518 | } | ||
519 | EXPORT_SYMBOL(ioremap_prot); | ||
520 | |||
521 | /* Map a PCI MMIO bus address into VA space. */ | ||
522 | void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) | ||
523 | { | ||
524 | panic("ioremap for PCI MMIO is not supported"); | ||
525 | } | ||
526 | EXPORT_SYMBOL(ioremap); | ||
527 | |||
528 | /* Unmap an MMIO VA mapping. */ | ||
529 | void iounmap(volatile void __iomem *addr_in) | ||
530 | { | ||
531 | volatile void __iomem *addr = (volatile void __iomem *) | ||
532 | (PAGE_MASK & (unsigned long __force)addr_in); | ||
533 | #if 1 | ||
534 | vunmap((void * __force)addr); | ||
535 | #else | ||
536 | /* x86 uses this complicated flow instead of vunmap(). Is | ||
537 | * there any particular reason we should do the same? */ | ||
538 | struct vm_struct *p, *o; | ||
539 | |||
540 | /* Use the vm area unlocked, assuming the caller | ||
541 | ensures there isn't another iounmap for the same address | ||
542 | in parallel. Reuse of the virtual address is prevented by | ||
543 | leaving it in the global lists until we're done with it. | ||
544 | cpa takes care of the direct mappings. */ | ||
545 | read_lock(&vmlist_lock); | ||
546 | for (p = vmlist; p; p = p->next) { | ||
547 | if (p->addr == addr) | ||
548 | break; | ||
549 | } | ||
550 | read_unlock(&vmlist_lock); | ||
551 | |||
552 | if (!p) { | ||
553 | printk("iounmap: bad address %p\n", addr); | ||
554 | dump_stack(); | ||
555 | return; | ||
556 | } | ||
557 | |||
558 | /* Finally remove it */ | ||
559 | o = remove_vm_area((void *)addr); | ||
560 | BUG_ON(p != o || o == NULL); | ||
561 | kfree(p); | ||
562 | #endif | ||
563 | } | ||
564 | EXPORT_SYMBOL(iounmap); | ||
565 | |||
566 | #endif /* CHIP_HAS_MMIO() */ | ||