diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2010-05-28 23:09:12 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2010-06-04 17:11:18 -0400 |
commit | 867e359b97c970a60626d5d76bbe2a8fadbf38fb (patch) | |
tree | c5ccbb7f5172e8555977119608ecb1eee3cc37e3 /arch/tile/include | |
parent | 5360bd776f73d0a7da571d72a09a03f237e99900 (diff) |
arch/tile: core support for Tilera 32-bit chips.
This change is the core kernel support for TILEPro and TILE64 chips.
No driver support (except the console driver) is included yet.
This includes the relevant Linux headers in asm/; the low-level
low-level "Tile architecture" headers in arch/, which are
shared with the hypervisor, etc., and are build-system agnostic;
and the relevant hypervisor headers in hv/.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Reviewed-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/tile/include')
128 files changed, 16093 insertions, 0 deletions
diff --git a/arch/tile/include/arch/abi.h b/arch/tile/include/arch/abi.h new file mode 100644 index 000000000000..7cdc47b3e02a --- /dev/null +++ b/arch/tile/include/arch/abi.h | |||
@@ -0,0 +1,93 @@ | |||
1 | // Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
2 | // | ||
3 | // This program is free software; you can redistribute it and/or | ||
4 | // modify it under the terms of the GNU General Public License | ||
5 | // as published by the Free Software Foundation, version 2. | ||
6 | // | ||
7 | // This program is distributed in the hope that it will be useful, but | ||
8 | // WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | // MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
10 | // NON INFRINGEMENT. See the GNU General Public License for | ||
11 | // more details. | ||
12 | |||
13 | //! @file | ||
14 | //! | ||
15 | //! ABI-related register definitions helpful when writing assembly code. | ||
16 | //! | ||
17 | |||
18 | #ifndef __ARCH_ABI_H__ | ||
19 | #define __ARCH_ABI_H__ | ||
20 | |||
21 | #include <arch/chip.h> | ||
22 | |||
23 | // Registers 0 - 55 are "normal", but some perform special roles. | ||
24 | |||
25 | #define TREG_FP 52 /**< Frame pointer. */ | ||
26 | #define TREG_TP 53 /**< Thread pointer. */ | ||
27 | #define TREG_SP 54 /**< Stack pointer. */ | ||
28 | #define TREG_LR 55 /**< Link to calling function PC. */ | ||
29 | |||
30 | /** Index of last normal general-purpose register. */ | ||
31 | #define TREG_LAST_GPR 55 | ||
32 | |||
33 | // Registers 56 - 62 are "special" network registers. | ||
34 | |||
35 | #define TREG_SN 56 /**< Static network access. */ | ||
36 | #define TREG_IDN0 57 /**< IDN demux 0 access. */ | ||
37 | #define TREG_IDN1 58 /**< IDN demux 1 access. */ | ||
38 | #define TREG_UDN0 59 /**< UDN demux 0 access. */ | ||
39 | #define TREG_UDN1 60 /**< UDN demux 1 access. */ | ||
40 | #define TREG_UDN2 61 /**< UDN demux 2 access. */ | ||
41 | #define TREG_UDN3 62 /**< UDN demux 3 access. */ | ||
42 | |||
43 | // Register 63 is the "special" zero register. | ||
44 | |||
45 | #define TREG_ZERO 63 /**< "Zero" register; always reads as "0". */ | ||
46 | |||
47 | |||
48 | /** By convention, this register is used to hold the syscall number. */ | ||
49 | #define TREG_SYSCALL_NR 10 | ||
50 | |||
51 | /** Name of register that holds the syscall number, for use in assembly. */ | ||
52 | #define TREG_SYSCALL_NR_NAME r10 | ||
53 | |||
54 | |||
55 | //! The ABI requires callers to allocate a caller state save area of | ||
56 | //! this many bytes at the bottom of each stack frame. | ||
57 | //! | ||
58 | #ifdef __tile__ | ||
59 | #define C_ABI_SAVE_AREA_SIZE (2 * __SIZEOF_POINTER__) | ||
60 | #endif | ||
61 | |||
62 | //! The operand to an 'info' opcode directing the backtracer to not | ||
63 | //! try to find the calling frame. | ||
64 | //! | ||
65 | #define INFO_OP_CANNOT_BACKTRACE 2 | ||
66 | |||
67 | #ifndef __ASSEMBLER__ | ||
68 | #if CHIP_WORD_SIZE() > 32 | ||
69 | |||
70 | //! Unsigned type that can hold a register. | ||
71 | typedef unsigned long long uint_reg_t; | ||
72 | |||
73 | //! Signed type that can hold a register. | ||
74 | typedef long long int_reg_t; | ||
75 | |||
76 | //! String prefix to use for printf(). | ||
77 | #define INT_REG_FMT "ll" | ||
78 | |||
79 | #elif !defined(__LP64__) /* avoid confusion with LP64 cross-build tools */ | ||
80 | |||
81 | //! Unsigned type that can hold a register. | ||
82 | typedef unsigned long uint_reg_t; | ||
83 | |||
84 | //! Signed type that can hold a register. | ||
85 | typedef long int_reg_t; | ||
86 | |||
87 | //! String prefix to use for printf(). | ||
88 | #define INT_REG_FMT "l" | ||
89 | |||
90 | #endif | ||
91 | #endif /* __ASSEMBLER__ */ | ||
92 | |||
93 | #endif // !__ARCH_ABI_H__ | ||
diff --git a/arch/tile/include/arch/chip.h b/arch/tile/include/arch/chip.h new file mode 100644 index 000000000000..926d3db0e91e --- /dev/null +++ b/arch/tile/include/arch/chip.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if __tile_chip__ == 0 | ||
16 | #include <arch/chip_tile64.h> | ||
17 | #elif __tile_chip__ == 1 | ||
18 | #include <arch/chip_tilepro.h> | ||
19 | #elif defined(__tilegx__) | ||
20 | #include <arch/chip_tilegx.h> | ||
21 | #else | ||
22 | #error Unexpected Tilera chip type | ||
23 | #endif | ||
diff --git a/arch/tile/include/arch/chip_tile64.h b/arch/tile/include/arch/chip_tile64.h new file mode 100644 index 000000000000..18b5bc8e563f --- /dev/null +++ b/arch/tile/include/arch/chip_tile64.h | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * @file | ||
17 | * Global header file. | ||
18 | * This header file specifies defines for TILE64. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_CHIP_H__ | ||
22 | #define __ARCH_CHIP_H__ | ||
23 | |||
24 | /** Specify chip version. | ||
25 | * When possible, prefer the CHIP_xxx symbols below for future-proofing. | ||
26 | * This is intended for cross-compiling; native compilation should | ||
27 | * use the predefined __tile_chip__ symbol. | ||
28 | */ | ||
29 | #define TILE_CHIP 0 | ||
30 | |||
31 | /** Specify chip revision. | ||
32 | * This provides for the case of a respin of a particular chip type; | ||
33 | * the normal value for this symbol is "0". | ||
34 | * This is intended for cross-compiling; native compilation should | ||
35 | * use the predefined __tile_chip_rev__ symbol. | ||
36 | */ | ||
37 | #define TILE_CHIP_REV 0 | ||
38 | |||
39 | /** The name of this architecture. */ | ||
40 | #define CHIP_ARCH_NAME "tile64" | ||
41 | |||
42 | /** The ELF e_machine type for binaries for this chip. */ | ||
43 | #define CHIP_ELF_TYPE() EM_TILE64 | ||
44 | |||
45 | /** The alternate ELF e_machine type for binaries for this chip. */ | ||
46 | #define CHIP_COMPAT_ELF_TYPE() 0x2506 | ||
47 | |||
48 | /** What is the native word size of the machine? */ | ||
49 | #define CHIP_WORD_SIZE() 32 | ||
50 | |||
51 | /** How many bits of a virtual address are used. Extra bits must be | ||
52 | * the sign extension of the low bits. | ||
53 | */ | ||
54 | #define CHIP_VA_WIDTH() 32 | ||
55 | |||
56 | /** How many bits are in a physical address? */ | ||
57 | #define CHIP_PA_WIDTH() 36 | ||
58 | |||
59 | /** Size of the L2 cache, in bytes. */ | ||
60 | #define CHIP_L2_CACHE_SIZE() 65536 | ||
61 | |||
62 | /** Log size of an L2 cache line in bytes. */ | ||
63 | #define CHIP_L2_LOG_LINE_SIZE() 6 | ||
64 | |||
65 | /** Size of an L2 cache line, in bytes. */ | ||
66 | #define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE()) | ||
67 | |||
68 | /** Associativity of the L2 cache. */ | ||
69 | #define CHIP_L2_ASSOC() 2 | ||
70 | |||
71 | /** Size of the L1 data cache, in bytes. */ | ||
72 | #define CHIP_L1D_CACHE_SIZE() 8192 | ||
73 | |||
74 | /** Log size of an L1 data cache line in bytes. */ | ||
75 | #define CHIP_L1D_LOG_LINE_SIZE() 4 | ||
76 | |||
77 | /** Size of an L1 data cache line, in bytes. */ | ||
78 | #define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE()) | ||
79 | |||
80 | /** Associativity of the L1 data cache. */ | ||
81 | #define CHIP_L1D_ASSOC() 2 | ||
82 | |||
83 | /** Size of the L1 instruction cache, in bytes. */ | ||
84 | #define CHIP_L1I_CACHE_SIZE() 8192 | ||
85 | |||
86 | /** Log size of an L1 instruction cache line in bytes. */ | ||
87 | #define CHIP_L1I_LOG_LINE_SIZE() 6 | ||
88 | |||
89 | /** Size of an L1 instruction cache line, in bytes. */ | ||
90 | #define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE()) | ||
91 | |||
92 | /** Associativity of the L1 instruction cache. */ | ||
93 | #define CHIP_L1I_ASSOC() 1 | ||
94 | |||
95 | /** Stride with which flush instructions must be issued. */ | ||
96 | #define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE() | ||
97 | |||
98 | /** Stride with which inv instructions must be issued. */ | ||
99 | #define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE() | ||
100 | |||
101 | /** Stride with which finv instructions must be issued. */ | ||
102 | #define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE() | ||
103 | |||
104 | /** Can the local cache coherently cache data that is homed elsewhere? */ | ||
105 | #define CHIP_HAS_COHERENT_LOCAL_CACHE() 0 | ||
106 | |||
107 | /** How many simultaneous outstanding victims can the L2 cache have? */ | ||
108 | #define CHIP_MAX_OUTSTANDING_VICTIMS() 2 | ||
109 | |||
110 | /** Does the TLB support the NC and NOALLOC bits? */ | ||
111 | #define CHIP_HAS_NC_AND_NOALLOC_BITS() 0 | ||
112 | |||
113 | /** Does the chip support hash-for-home caching? */ | ||
114 | #define CHIP_HAS_CBOX_HOME_MAP() 0 | ||
115 | |||
116 | /** Number of entries in the chip's home map tables. */ | ||
117 | /* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */ | ||
118 | |||
119 | /** Do uncacheable requests miss in the cache regardless of whether | ||
120 | * there is matching data? */ | ||
121 | #define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0 | ||
122 | |||
123 | /** Does the mf instruction wait for victims? */ | ||
124 | #define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1 | ||
125 | |||
126 | /** Does the chip have an "inv" instruction that doesn't also flush? */ | ||
127 | #define CHIP_HAS_INV() 0 | ||
128 | |||
129 | /** Does the chip have a "wh64" instruction? */ | ||
130 | #define CHIP_HAS_WH64() 0 | ||
131 | |||
132 | /** Does this chip have a 'dword_align' instruction? */ | ||
133 | #define CHIP_HAS_DWORD_ALIGN() 0 | ||
134 | |||
135 | /** Number of performance counters. */ | ||
136 | #define CHIP_PERFORMANCE_COUNTERS() 2 | ||
137 | |||
138 | /** Does this chip have auxiliary performance counters? */ | ||
139 | #define CHIP_HAS_AUX_PERF_COUNTERS() 0 | ||
140 | |||
141 | /** Is the CBOX_MSR1 SPR supported? */ | ||
142 | #define CHIP_HAS_CBOX_MSR1() 0 | ||
143 | |||
144 | /** Is the TILE_RTF_HWM SPR supported? */ | ||
145 | #define CHIP_HAS_TILE_RTF_HWM() 0 | ||
146 | |||
147 | /** Is the TILE_WRITE_PENDING SPR supported? */ | ||
148 | #define CHIP_HAS_TILE_WRITE_PENDING() 0 | ||
149 | |||
150 | /** Is the PROC_STATUS SPR supported? */ | ||
151 | #define CHIP_HAS_PROC_STATUS_SPR() 0 | ||
152 | |||
153 | /** Log of the number of mshims we have. */ | ||
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | ||
155 | |||
156 | /** Are the bases of the interrupt vector areas fixed? */ | ||
157 | #define CHIP_HAS_FIXED_INTVEC_BASE() 1 | ||
158 | |||
159 | /** Are the interrupt masks split up into 2 SPRs? */ | ||
160 | #define CHIP_HAS_SPLIT_INTR_MASK() 1 | ||
161 | |||
162 | /** Is the cycle count split up into 2 SPRs? */ | ||
163 | #define CHIP_HAS_SPLIT_CYCLE() 1 | ||
164 | |||
165 | /** Does the chip have a static network? */ | ||
166 | #define CHIP_HAS_SN() 1 | ||
167 | |||
168 | /** Does the chip have a static network processor? */ | ||
169 | #define CHIP_HAS_SN_PROC() 1 | ||
170 | |||
171 | /** Size of the L1 static network processor instruction cache, in bytes. */ | ||
172 | #define CHIP_L1SNI_CACHE_SIZE() 2048 | ||
173 | |||
174 | /** Does the chip have DMA support in each tile? */ | ||
175 | #define CHIP_HAS_TILE_DMA() 1 | ||
176 | |||
177 | /** Does the chip have the second revision of the directly accessible | ||
178 | * dynamic networks? This encapsulates a number of characteristics, | ||
179 | * including the absence of the catch-all, the absence of inline message | ||
180 | * tags, the absence of support for network context-switching, and so on. | ||
181 | */ | ||
182 | #define CHIP_HAS_REV1_XDN() 0 | ||
183 | |||
184 | /** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */ | ||
185 | #define CHIP_HAS_CMPEXCH() 0 | ||
186 | |||
187 | /** Does the chip have memory-mapped I/O support? */ | ||
188 | #define CHIP_HAS_MMIO() 0 | ||
189 | |||
190 | /** Does the chip have post-completion interrupts? */ | ||
191 | #define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0 | ||
192 | |||
193 | /** Does the chip have native single step support? */ | ||
194 | #define CHIP_HAS_SINGLE_STEP() 0 | ||
195 | |||
196 | #ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */ | ||
197 | |||
198 | /** How many entries are present in the instruction TLB? */ | ||
199 | #define CHIP_ITLB_ENTRIES() 8 | ||
200 | |||
201 | /** How many entries are present in the data TLB? */ | ||
202 | #define CHIP_DTLB_ENTRIES() 16 | ||
203 | |||
204 | /** How many MAF entries does the XAUI shim have? */ | ||
205 | #define CHIP_XAUI_MAF_ENTRIES() 16 | ||
206 | |||
207 | /** Does the memory shim have a source-id table? */ | ||
208 | #define CHIP_HAS_MSHIM_SRCID_TABLE() 1 | ||
209 | |||
210 | /** Does the L1 instruction cache clear on reset? */ | ||
211 | #define CHIP_HAS_L1I_CLEAR_ON_RESET() 0 | ||
212 | |||
213 | /** Does the chip come out of reset with valid coordinates on all tiles? | ||
214 | * Note that if defined, this also implies that the upper left is 1,1. | ||
215 | */ | ||
216 | #define CHIP_HAS_VALID_TILE_COORD_RESET() 0 | ||
217 | |||
218 | /** Does the chip have unified packet formats? */ | ||
219 | #define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0 | ||
220 | |||
221 | /** Does the chip support write reordering? */ | ||
222 | #define CHIP_HAS_WRITE_REORDERING() 0 | ||
223 | |||
224 | /** Does the chip support Y-X routing as well as X-Y? */ | ||
225 | #define CHIP_HAS_Y_X_ROUTING() 0 | ||
226 | |||
227 | /** Is INTCTRL_3 managed with the correct MPL? */ | ||
228 | #define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0 | ||
229 | |||
230 | /** Is it possible to configure the chip to be big-endian? */ | ||
231 | #define CHIP_HAS_BIG_ENDIAN_CONFIG() 0 | ||
232 | |||
233 | /** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */ | ||
234 | #define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0 | ||
235 | |||
236 | /** Is the DIAG_TRACE_WAY SPR supported? */ | ||
237 | #define CHIP_HAS_DIAG_TRACE_WAY() 0 | ||
238 | |||
239 | /** Is the MEM_STRIPE_CONFIG SPR supported? */ | ||
240 | #define CHIP_HAS_MEM_STRIPE_CONFIG() 0 | ||
241 | |||
242 | /** Are the TLB_PERF SPRs supported? */ | ||
243 | #define CHIP_HAS_TLB_PERF() 0 | ||
244 | |||
245 | /** Is the VDN_SNOOP_SHIM_CTL SPR supported? */ | ||
246 | #define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0 | ||
247 | |||
248 | /** Does the chip support rev1 DMA packets? */ | ||
249 | #define CHIP_HAS_REV1_DMA_PACKETS() 0 | ||
250 | |||
251 | #endif /* !__OPEN_SOURCE__ */ | ||
252 | #endif /* __ARCH_CHIP_H__ */ | ||
diff --git a/arch/tile/include/arch/chip_tilepro.h b/arch/tile/include/arch/chip_tilepro.h new file mode 100644 index 000000000000..9852af163862 --- /dev/null +++ b/arch/tile/include/arch/chip_tilepro.h | |||
@@ -0,0 +1,252 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * @file | ||
17 | * Global header file. | ||
18 | * This header file specifies defines for TILEPro. | ||
19 | */ | ||
20 | |||
21 | #ifndef __ARCH_CHIP_H__ | ||
22 | #define __ARCH_CHIP_H__ | ||
23 | |||
24 | /** Specify chip version. | ||
25 | * When possible, prefer the CHIP_xxx symbols below for future-proofing. | ||
26 | * This is intended for cross-compiling; native compilation should | ||
27 | * use the predefined __tile_chip__ symbol. | ||
28 | */ | ||
29 | #define TILE_CHIP 1 | ||
30 | |||
31 | /** Specify chip revision. | ||
32 | * This provides for the case of a respin of a particular chip type; | ||
33 | * the normal value for this symbol is "0". | ||
34 | * This is intended for cross-compiling; native compilation should | ||
35 | * use the predefined __tile_chip_rev__ symbol. | ||
36 | */ | ||
37 | #define TILE_CHIP_REV 0 | ||
38 | |||
39 | /** The name of this architecture. */ | ||
40 | #define CHIP_ARCH_NAME "tilepro" | ||
41 | |||
42 | /** The ELF e_machine type for binaries for this chip. */ | ||
43 | #define CHIP_ELF_TYPE() EM_TILEPRO | ||
44 | |||
45 | /** The alternate ELF e_machine type for binaries for this chip. */ | ||
46 | #define CHIP_COMPAT_ELF_TYPE() 0x2507 | ||
47 | |||
48 | /** What is the native word size of the machine? */ | ||
49 | #define CHIP_WORD_SIZE() 32 | ||
50 | |||
51 | /** How many bits of a virtual address are used. Extra bits must be | ||
52 | * the sign extension of the low bits. | ||
53 | */ | ||
54 | #define CHIP_VA_WIDTH() 32 | ||
55 | |||
56 | /** How many bits are in a physical address? */ | ||
57 | #define CHIP_PA_WIDTH() 36 | ||
58 | |||
59 | /** Size of the L2 cache, in bytes. */ | ||
60 | #define CHIP_L2_CACHE_SIZE() 65536 | ||
61 | |||
62 | /** Log size of an L2 cache line in bytes. */ | ||
63 | #define CHIP_L2_LOG_LINE_SIZE() 6 | ||
64 | |||
65 | /** Size of an L2 cache line, in bytes. */ | ||
66 | #define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE()) | ||
67 | |||
68 | /** Associativity of the L2 cache. */ | ||
69 | #define CHIP_L2_ASSOC() 4 | ||
70 | |||
71 | /** Size of the L1 data cache, in bytes. */ | ||
72 | #define CHIP_L1D_CACHE_SIZE() 8192 | ||
73 | |||
74 | /** Log size of an L1 data cache line in bytes. */ | ||
75 | #define CHIP_L1D_LOG_LINE_SIZE() 4 | ||
76 | |||
77 | /** Size of an L1 data cache line, in bytes. */ | ||
78 | #define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE()) | ||
79 | |||
80 | /** Associativity of the L1 data cache. */ | ||
81 | #define CHIP_L1D_ASSOC() 2 | ||
82 | |||
83 | /** Size of the L1 instruction cache, in bytes. */ | ||
84 | #define CHIP_L1I_CACHE_SIZE() 16384 | ||
85 | |||
86 | /** Log size of an L1 instruction cache line in bytes. */ | ||
87 | #define CHIP_L1I_LOG_LINE_SIZE() 6 | ||
88 | |||
89 | /** Size of an L1 instruction cache line, in bytes. */ | ||
90 | #define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE()) | ||
91 | |||
92 | /** Associativity of the L1 instruction cache. */ | ||
93 | #define CHIP_L1I_ASSOC() 1 | ||
94 | |||
95 | /** Stride with which flush instructions must be issued. */ | ||
96 | #define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE() | ||
97 | |||
98 | /** Stride with which inv instructions must be issued. */ | ||
99 | #define CHIP_INV_STRIDE() CHIP_L2_LINE_SIZE() | ||
100 | |||
101 | /** Stride with which finv instructions must be issued. */ | ||
102 | #define CHIP_FINV_STRIDE() CHIP_L2_LINE_SIZE() | ||
103 | |||
104 | /** Can the local cache coherently cache data that is homed elsewhere? */ | ||
105 | #define CHIP_HAS_COHERENT_LOCAL_CACHE() 1 | ||
106 | |||
107 | /** How many simultaneous outstanding victims can the L2 cache have? */ | ||
108 | #define CHIP_MAX_OUTSTANDING_VICTIMS() 4 | ||
109 | |||
110 | /** Does the TLB support the NC and NOALLOC bits? */ | ||
111 | #define CHIP_HAS_NC_AND_NOALLOC_BITS() 1 | ||
112 | |||
113 | /** Does the chip support hash-for-home caching? */ | ||
114 | #define CHIP_HAS_CBOX_HOME_MAP() 1 | ||
115 | |||
116 | /** Number of entries in the chip's home map tables. */ | ||
117 | #define CHIP_CBOX_HOME_MAP_SIZE() 64 | ||
118 | |||
119 | /** Do uncacheable requests miss in the cache regardless of whether | ||
120 | * there is matching data? */ | ||
121 | #define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 1 | ||
122 | |||
123 | /** Does the mf instruction wait for victims? */ | ||
124 | #define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 0 | ||
125 | |||
126 | /** Does the chip have an "inv" instruction that doesn't also flush? */ | ||
127 | #define CHIP_HAS_INV() 1 | ||
128 | |||
129 | /** Does the chip have a "wh64" instruction? */ | ||
130 | #define CHIP_HAS_WH64() 1 | ||
131 | |||
132 | /** Does this chip have a 'dword_align' instruction? */ | ||
133 | #define CHIP_HAS_DWORD_ALIGN() 1 | ||
134 | |||
135 | /** Number of performance counters. */ | ||
136 | #define CHIP_PERFORMANCE_COUNTERS() 4 | ||
137 | |||
138 | /** Does this chip have auxiliary performance counters? */ | ||
139 | #define CHIP_HAS_AUX_PERF_COUNTERS() 1 | ||
140 | |||
141 | /** Is the CBOX_MSR1 SPR supported? */ | ||
142 | #define CHIP_HAS_CBOX_MSR1() 1 | ||
143 | |||
144 | /** Is the TILE_RTF_HWM SPR supported? */ | ||
145 | #define CHIP_HAS_TILE_RTF_HWM() 1 | ||
146 | |||
147 | /** Is the TILE_WRITE_PENDING SPR supported? */ | ||
148 | #define CHIP_HAS_TILE_WRITE_PENDING() 1 | ||
149 | |||
150 | /** Is the PROC_STATUS SPR supported? */ | ||
151 | #define CHIP_HAS_PROC_STATUS_SPR() 1 | ||
152 | |||
153 | /** Log of the number of mshims we have. */ | ||
154 | #define CHIP_LOG_NUM_MSHIMS() 2 | ||
155 | |||
156 | /** Are the bases of the interrupt vector areas fixed? */ | ||
157 | #define CHIP_HAS_FIXED_INTVEC_BASE() 1 | ||
158 | |||
159 | /** Are the interrupt masks split up into 2 SPRs? */ | ||
160 | #define CHIP_HAS_SPLIT_INTR_MASK() 1 | ||
161 | |||
162 | /** Is the cycle count split up into 2 SPRs? */ | ||
163 | #define CHIP_HAS_SPLIT_CYCLE() 1 | ||
164 | |||
165 | /** Does the chip have a static network? */ | ||
166 | #define CHIP_HAS_SN() 1 | ||
167 | |||
168 | /** Does the chip have a static network processor? */ | ||
169 | #define CHIP_HAS_SN_PROC() 0 | ||
170 | |||
171 | /** Size of the L1 static network processor instruction cache, in bytes. */ | ||
172 | /* #define CHIP_L1SNI_CACHE_SIZE() -- does not apply to chip 1 */ | ||
173 | |||
174 | /** Does the chip have DMA support in each tile? */ | ||
175 | #define CHIP_HAS_TILE_DMA() 1 | ||
176 | |||
177 | /** Does the chip have the second revision of the directly accessible | ||
178 | * dynamic networks? This encapsulates a number of characteristics, | ||
179 | * including the absence of the catch-all, the absence of inline message | ||
180 | * tags, the absence of support for network context-switching, and so on. | ||
181 | */ | ||
182 | #define CHIP_HAS_REV1_XDN() 0 | ||
183 | |||
184 | /** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */ | ||
185 | #define CHIP_HAS_CMPEXCH() 0 | ||
186 | |||
187 | /** Does the chip have memory-mapped I/O support? */ | ||
188 | #define CHIP_HAS_MMIO() 0 | ||
189 | |||
190 | /** Does the chip have post-completion interrupts? */ | ||
191 | #define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0 | ||
192 | |||
193 | /** Does the chip have native single step support? */ | ||
194 | #define CHIP_HAS_SINGLE_STEP() 0 | ||
195 | |||
196 | #ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */ | ||
197 | |||
198 | /** How many entries are present in the instruction TLB? */ | ||
199 | #define CHIP_ITLB_ENTRIES() 16 | ||
200 | |||
201 | /** How many entries are present in the data TLB? */ | ||
202 | #define CHIP_DTLB_ENTRIES() 16 | ||
203 | |||
204 | /** How many MAF entries does the XAUI shim have? */ | ||
205 | #define CHIP_XAUI_MAF_ENTRIES() 32 | ||
206 | |||
207 | /** Does the memory shim have a source-id table? */ | ||
208 | #define CHIP_HAS_MSHIM_SRCID_TABLE() 0 | ||
209 | |||
210 | /** Does the L1 instruction cache clear on reset? */ | ||
211 | #define CHIP_HAS_L1I_CLEAR_ON_RESET() 1 | ||
212 | |||
213 | /** Does the chip come out of reset with valid coordinates on all tiles? | ||
214 | * Note that if defined, this also implies that the upper left is 1,1. | ||
215 | */ | ||
216 | #define CHIP_HAS_VALID_TILE_COORD_RESET() 1 | ||
217 | |||
218 | /** Does the chip have unified packet formats? */ | ||
219 | #define CHIP_HAS_UNIFIED_PACKET_FORMATS() 1 | ||
220 | |||
221 | /** Does the chip support write reordering? */ | ||
222 | #define CHIP_HAS_WRITE_REORDERING() 1 | ||
223 | |||
224 | /** Does the chip support Y-X routing as well as X-Y? */ | ||
225 | #define CHIP_HAS_Y_X_ROUTING() 1 | ||
226 | |||
227 | /** Is INTCTRL_3 managed with the correct MPL? */ | ||
228 | #define CHIP_HAS_INTCTRL_3_STATUS_FIX() 1 | ||
229 | |||
230 | /** Is it possible to configure the chip to be big-endian? */ | ||
231 | #define CHIP_HAS_BIG_ENDIAN_CONFIG() 1 | ||
232 | |||
233 | /** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */ | ||
234 | #define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 1 | ||
235 | |||
236 | /** Is the DIAG_TRACE_WAY SPR supported? */ | ||
237 | #define CHIP_HAS_DIAG_TRACE_WAY() 1 | ||
238 | |||
239 | /** Is the MEM_STRIPE_CONFIG SPR supported? */ | ||
240 | #define CHIP_HAS_MEM_STRIPE_CONFIG() 1 | ||
241 | |||
242 | /** Are the TLB_PERF SPRs supported? */ | ||
243 | #define CHIP_HAS_TLB_PERF() 1 | ||
244 | |||
245 | /** Is the VDN_SNOOP_SHIM_CTL SPR supported? */ | ||
246 | #define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 1 | ||
247 | |||
248 | /** Does the chip support rev1 DMA packets? */ | ||
249 | #define CHIP_HAS_REV1_DMA_PACKETS() 1 | ||
250 | |||
251 | #endif /* !__OPEN_SOURCE__ */ | ||
252 | #endif /* __ARCH_CHIP_H__ */ | ||
diff --git a/arch/tile/include/arch/interrupts.h b/arch/tile/include/arch/interrupts.h new file mode 100644 index 000000000000..20f8f07d2de9 --- /dev/null +++ b/arch/tile/include/arch/interrupts.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifdef __tilegx__ | ||
16 | #include <arch/interrupts_64.h> | ||
17 | #else | ||
18 | #include <arch/interrupts_32.h> | ||
19 | #endif | ||
diff --git a/arch/tile/include/arch/interrupts_32.h b/arch/tile/include/arch/interrupts_32.h new file mode 100644 index 000000000000..feffada705f0 --- /dev/null +++ b/arch/tile/include/arch/interrupts_32.h | |||
@@ -0,0 +1,304 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef __ARCH_INTERRUPTS_H__ | ||
16 | #define __ARCH_INTERRUPTS_H__ | ||
17 | |||
18 | /** Mask for an interrupt. */ | ||
19 | #ifdef __ASSEMBLER__ | ||
20 | /* Note: must handle breaking interrupts into high and low words manually. */ | ||
21 | #define INT_MASK(intno) (1 << (intno)) | ||
22 | #else | ||
23 | #define INT_MASK(intno) (1ULL << (intno)) | ||
24 | #endif | ||
25 | |||
26 | |||
27 | /** Where a given interrupt executes */ | ||
28 | #define INTERRUPT_VECTOR(i, pl) (0xFC000000 + ((pl) << 24) + ((i) << 8)) | ||
29 | |||
30 | /** Where to store a vector for a given interrupt. */ | ||
31 | #define USER_INTERRUPT_VECTOR(i) INTERRUPT_VECTOR(i, 0) | ||
32 | |||
33 | /** The base address of user-level interrupts. */ | ||
34 | #define USER_INTERRUPT_VECTOR_BASE INTERRUPT_VECTOR(0, 0) | ||
35 | |||
36 | |||
37 | /** Additional synthetic interrupt. */ | ||
38 | #define INT_BREAKPOINT (63) | ||
39 | |||
40 | #define INT_ITLB_MISS 0 | ||
41 | #define INT_MEM_ERROR 1 | ||
42 | #define INT_ILL 2 | ||
43 | #define INT_GPV 3 | ||
44 | #define INT_SN_ACCESS 4 | ||
45 | #define INT_IDN_ACCESS 5 | ||
46 | #define INT_UDN_ACCESS 6 | ||
47 | #define INT_IDN_REFILL 7 | ||
48 | #define INT_UDN_REFILL 8 | ||
49 | #define INT_IDN_COMPLETE 9 | ||
50 | #define INT_UDN_COMPLETE 10 | ||
51 | #define INT_SWINT_3 11 | ||
52 | #define INT_SWINT_2 12 | ||
53 | #define INT_SWINT_1 13 | ||
54 | #define INT_SWINT_0 14 | ||
55 | #define INT_UNALIGN_DATA 15 | ||
56 | #define INT_DTLB_MISS 16 | ||
57 | #define INT_DTLB_ACCESS 17 | ||
58 | #define INT_DMATLB_MISS 18 | ||
59 | #define INT_DMATLB_ACCESS 19 | ||
60 | #define INT_SNITLB_MISS 20 | ||
61 | #define INT_SN_NOTIFY 21 | ||
62 | #define INT_SN_FIREWALL 22 | ||
63 | #define INT_IDN_FIREWALL 23 | ||
64 | #define INT_UDN_FIREWALL 24 | ||
65 | #define INT_TILE_TIMER 25 | ||
66 | #define INT_IDN_TIMER 26 | ||
67 | #define INT_UDN_TIMER 27 | ||
68 | #define INT_DMA_NOTIFY 28 | ||
69 | #define INT_IDN_CA 29 | ||
70 | #define INT_UDN_CA 30 | ||
71 | #define INT_IDN_AVAIL 31 | ||
72 | #define INT_UDN_AVAIL 32 | ||
73 | #define INT_PERF_COUNT 33 | ||
74 | #define INT_INTCTRL_3 34 | ||
75 | #define INT_INTCTRL_2 35 | ||
76 | #define INT_INTCTRL_1 36 | ||
77 | #define INT_INTCTRL_0 37 | ||
78 | #define INT_BOOT_ACCESS 38 | ||
79 | #define INT_WORLD_ACCESS 39 | ||
80 | #define INT_I_ASID 40 | ||
81 | #define INT_D_ASID 41 | ||
82 | #define INT_DMA_ASID 42 | ||
83 | #define INT_SNI_ASID 43 | ||
84 | #define INT_DMA_CPL 44 | ||
85 | #define INT_SN_CPL 45 | ||
86 | #define INT_DOUBLE_FAULT 46 | ||
87 | #define INT_SN_STATIC_ACCESS 47 | ||
88 | #define INT_AUX_PERF_COUNT 48 | ||
89 | |||
90 | #define NUM_INTERRUPTS 49 | ||
91 | |||
92 | #define QUEUED_INTERRUPTS ( \ | ||
93 | INT_MASK(INT_MEM_ERROR) | \ | ||
94 | INT_MASK(INT_DMATLB_MISS) | \ | ||
95 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
96 | INT_MASK(INT_SNITLB_MISS) | \ | ||
97 | INT_MASK(INT_SN_NOTIFY) | \ | ||
98 | INT_MASK(INT_SN_FIREWALL) | \ | ||
99 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
100 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
101 | INT_MASK(INT_TILE_TIMER) | \ | ||
102 | INT_MASK(INT_IDN_TIMER) | \ | ||
103 | INT_MASK(INT_UDN_TIMER) | \ | ||
104 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
105 | INT_MASK(INT_IDN_CA) | \ | ||
106 | INT_MASK(INT_UDN_CA) | \ | ||
107 | INT_MASK(INT_IDN_AVAIL) | \ | ||
108 | INT_MASK(INT_UDN_AVAIL) | \ | ||
109 | INT_MASK(INT_PERF_COUNT) | \ | ||
110 | INT_MASK(INT_INTCTRL_3) | \ | ||
111 | INT_MASK(INT_INTCTRL_2) | \ | ||
112 | INT_MASK(INT_INTCTRL_1) | \ | ||
113 | INT_MASK(INT_INTCTRL_0) | \ | ||
114 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
115 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
116 | INT_MASK(INT_I_ASID) | \ | ||
117 | INT_MASK(INT_D_ASID) | \ | ||
118 | INT_MASK(INT_DMA_ASID) | \ | ||
119 | INT_MASK(INT_SNI_ASID) | \ | ||
120 | INT_MASK(INT_DMA_CPL) | \ | ||
121 | INT_MASK(INT_SN_CPL) | \ | ||
122 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
123 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
124 | 0) | ||
125 | #define NONQUEUED_INTERRUPTS ( \ | ||
126 | INT_MASK(INT_ITLB_MISS) | \ | ||
127 | INT_MASK(INT_ILL) | \ | ||
128 | INT_MASK(INT_GPV) | \ | ||
129 | INT_MASK(INT_SN_ACCESS) | \ | ||
130 | INT_MASK(INT_IDN_ACCESS) | \ | ||
131 | INT_MASK(INT_UDN_ACCESS) | \ | ||
132 | INT_MASK(INT_IDN_REFILL) | \ | ||
133 | INT_MASK(INT_UDN_REFILL) | \ | ||
134 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
135 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
136 | INT_MASK(INT_SWINT_3) | \ | ||
137 | INT_MASK(INT_SWINT_2) | \ | ||
138 | INT_MASK(INT_SWINT_1) | \ | ||
139 | INT_MASK(INT_SWINT_0) | \ | ||
140 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
141 | INT_MASK(INT_DTLB_MISS) | \ | ||
142 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
143 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
144 | 0) | ||
145 | #define CRITICAL_MASKED_INTERRUPTS ( \ | ||
146 | INT_MASK(INT_MEM_ERROR) | \ | ||
147 | INT_MASK(INT_DMATLB_MISS) | \ | ||
148 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
149 | INT_MASK(INT_SNITLB_MISS) | \ | ||
150 | INT_MASK(INT_SN_NOTIFY) | \ | ||
151 | INT_MASK(INT_SN_FIREWALL) | \ | ||
152 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
153 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
154 | INT_MASK(INT_TILE_TIMER) | \ | ||
155 | INT_MASK(INT_IDN_TIMER) | \ | ||
156 | INT_MASK(INT_UDN_TIMER) | \ | ||
157 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
158 | INT_MASK(INT_IDN_CA) | \ | ||
159 | INT_MASK(INT_UDN_CA) | \ | ||
160 | INT_MASK(INT_IDN_AVAIL) | \ | ||
161 | INT_MASK(INT_UDN_AVAIL) | \ | ||
162 | INT_MASK(INT_PERF_COUNT) | \ | ||
163 | INT_MASK(INT_INTCTRL_3) | \ | ||
164 | INT_MASK(INT_INTCTRL_2) | \ | ||
165 | INT_MASK(INT_INTCTRL_1) | \ | ||
166 | INT_MASK(INT_INTCTRL_0) | \ | ||
167 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
168 | 0) | ||
169 | #define CRITICAL_UNMASKED_INTERRUPTS ( \ | ||
170 | INT_MASK(INT_ITLB_MISS) | \ | ||
171 | INT_MASK(INT_ILL) | \ | ||
172 | INT_MASK(INT_GPV) | \ | ||
173 | INT_MASK(INT_SN_ACCESS) | \ | ||
174 | INT_MASK(INT_IDN_ACCESS) | \ | ||
175 | INT_MASK(INT_UDN_ACCESS) | \ | ||
176 | INT_MASK(INT_IDN_REFILL) | \ | ||
177 | INT_MASK(INT_UDN_REFILL) | \ | ||
178 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
179 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
180 | INT_MASK(INT_SWINT_3) | \ | ||
181 | INT_MASK(INT_SWINT_2) | \ | ||
182 | INT_MASK(INT_SWINT_1) | \ | ||
183 | INT_MASK(INT_SWINT_0) | \ | ||
184 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
185 | INT_MASK(INT_DTLB_MISS) | \ | ||
186 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
187 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
188 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
189 | INT_MASK(INT_I_ASID) | \ | ||
190 | INT_MASK(INT_D_ASID) | \ | ||
191 | INT_MASK(INT_DMA_ASID) | \ | ||
192 | INT_MASK(INT_SNI_ASID) | \ | ||
193 | INT_MASK(INT_DMA_CPL) | \ | ||
194 | INT_MASK(INT_SN_CPL) | \ | ||
195 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
196 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
197 | 0) | ||
198 | #define MASKABLE_INTERRUPTS ( \ | ||
199 | INT_MASK(INT_MEM_ERROR) | \ | ||
200 | INT_MASK(INT_IDN_REFILL) | \ | ||
201 | INT_MASK(INT_UDN_REFILL) | \ | ||
202 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
203 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
204 | INT_MASK(INT_DMATLB_MISS) | \ | ||
205 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
206 | INT_MASK(INT_SNITLB_MISS) | \ | ||
207 | INT_MASK(INT_SN_NOTIFY) | \ | ||
208 | INT_MASK(INT_SN_FIREWALL) | \ | ||
209 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
210 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
211 | INT_MASK(INT_TILE_TIMER) | \ | ||
212 | INT_MASK(INT_IDN_TIMER) | \ | ||
213 | INT_MASK(INT_UDN_TIMER) | \ | ||
214 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
215 | INT_MASK(INT_IDN_CA) | \ | ||
216 | INT_MASK(INT_UDN_CA) | \ | ||
217 | INT_MASK(INT_IDN_AVAIL) | \ | ||
218 | INT_MASK(INT_UDN_AVAIL) | \ | ||
219 | INT_MASK(INT_PERF_COUNT) | \ | ||
220 | INT_MASK(INT_INTCTRL_3) | \ | ||
221 | INT_MASK(INT_INTCTRL_2) | \ | ||
222 | INT_MASK(INT_INTCTRL_1) | \ | ||
223 | INT_MASK(INT_INTCTRL_0) | \ | ||
224 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
225 | 0) | ||
226 | #define UNMASKABLE_INTERRUPTS ( \ | ||
227 | INT_MASK(INT_ITLB_MISS) | \ | ||
228 | INT_MASK(INT_ILL) | \ | ||
229 | INT_MASK(INT_GPV) | \ | ||
230 | INT_MASK(INT_SN_ACCESS) | \ | ||
231 | INT_MASK(INT_IDN_ACCESS) | \ | ||
232 | INT_MASK(INT_UDN_ACCESS) | \ | ||
233 | INT_MASK(INT_SWINT_3) | \ | ||
234 | INT_MASK(INT_SWINT_2) | \ | ||
235 | INT_MASK(INT_SWINT_1) | \ | ||
236 | INT_MASK(INT_SWINT_0) | \ | ||
237 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
238 | INT_MASK(INT_DTLB_MISS) | \ | ||
239 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
240 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
241 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
242 | INT_MASK(INT_I_ASID) | \ | ||
243 | INT_MASK(INT_D_ASID) | \ | ||
244 | INT_MASK(INT_DMA_ASID) | \ | ||
245 | INT_MASK(INT_SNI_ASID) | \ | ||
246 | INT_MASK(INT_DMA_CPL) | \ | ||
247 | INT_MASK(INT_SN_CPL) | \ | ||
248 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
249 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
250 | 0) | ||
251 | #define SYNC_INTERRUPTS ( \ | ||
252 | INT_MASK(INT_ITLB_MISS) | \ | ||
253 | INT_MASK(INT_ILL) | \ | ||
254 | INT_MASK(INT_GPV) | \ | ||
255 | INT_MASK(INT_SN_ACCESS) | \ | ||
256 | INT_MASK(INT_IDN_ACCESS) | \ | ||
257 | INT_MASK(INT_UDN_ACCESS) | \ | ||
258 | INT_MASK(INT_IDN_REFILL) | \ | ||
259 | INT_MASK(INT_UDN_REFILL) | \ | ||
260 | INT_MASK(INT_IDN_COMPLETE) | \ | ||
261 | INT_MASK(INT_UDN_COMPLETE) | \ | ||
262 | INT_MASK(INT_SWINT_3) | \ | ||
263 | INT_MASK(INT_SWINT_2) | \ | ||
264 | INT_MASK(INT_SWINT_1) | \ | ||
265 | INT_MASK(INT_SWINT_0) | \ | ||
266 | INT_MASK(INT_UNALIGN_DATA) | \ | ||
267 | INT_MASK(INT_DTLB_MISS) | \ | ||
268 | INT_MASK(INT_DTLB_ACCESS) | \ | ||
269 | INT_MASK(INT_SN_STATIC_ACCESS) | \ | ||
270 | 0) | ||
271 | #define NON_SYNC_INTERRUPTS ( \ | ||
272 | INT_MASK(INT_MEM_ERROR) | \ | ||
273 | INT_MASK(INT_DMATLB_MISS) | \ | ||
274 | INT_MASK(INT_DMATLB_ACCESS) | \ | ||
275 | INT_MASK(INT_SNITLB_MISS) | \ | ||
276 | INT_MASK(INT_SN_NOTIFY) | \ | ||
277 | INT_MASK(INT_SN_FIREWALL) | \ | ||
278 | INT_MASK(INT_IDN_FIREWALL) | \ | ||
279 | INT_MASK(INT_UDN_FIREWALL) | \ | ||
280 | INT_MASK(INT_TILE_TIMER) | \ | ||
281 | INT_MASK(INT_IDN_TIMER) | \ | ||
282 | INT_MASK(INT_UDN_TIMER) | \ | ||
283 | INT_MASK(INT_DMA_NOTIFY) | \ | ||
284 | INT_MASK(INT_IDN_CA) | \ | ||
285 | INT_MASK(INT_UDN_CA) | \ | ||
286 | INT_MASK(INT_IDN_AVAIL) | \ | ||
287 | INT_MASK(INT_UDN_AVAIL) | \ | ||
288 | INT_MASK(INT_PERF_COUNT) | \ | ||
289 | INT_MASK(INT_INTCTRL_3) | \ | ||
290 | INT_MASK(INT_INTCTRL_2) | \ | ||
291 | INT_MASK(INT_INTCTRL_1) | \ | ||
292 | INT_MASK(INT_INTCTRL_0) | \ | ||
293 | INT_MASK(INT_BOOT_ACCESS) | \ | ||
294 | INT_MASK(INT_WORLD_ACCESS) | \ | ||
295 | INT_MASK(INT_I_ASID) | \ | ||
296 | INT_MASK(INT_D_ASID) | \ | ||
297 | INT_MASK(INT_DMA_ASID) | \ | ||
298 | INT_MASK(INT_SNI_ASID) | \ | ||
299 | INT_MASK(INT_DMA_CPL) | \ | ||
300 | INT_MASK(INT_SN_CPL) | \ | ||
301 | INT_MASK(INT_DOUBLE_FAULT) | \ | ||
302 | INT_MASK(INT_AUX_PERF_COUNT) | \ | ||
303 | 0) | ||
304 | #endif // !__ARCH_INTERRUPTS_H__ | ||
diff --git a/arch/tile/include/arch/sim_def.h b/arch/tile/include/arch/sim_def.h new file mode 100644 index 000000000000..6418fbde063e --- /dev/null +++ b/arch/tile/include/arch/sim_def.h | |||
@@ -0,0 +1,512 @@ | |||
1 | // Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
2 | // | ||
3 | // This program is free software; you can redistribute it and/or | ||
4 | // modify it under the terms of the GNU General Public License | ||
5 | // as published by the Free Software Foundation, version 2. | ||
6 | // | ||
7 | // This program is distributed in the hope that it will be useful, but | ||
8 | // WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | // MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
10 | // NON INFRINGEMENT. See the GNU General Public License for | ||
11 | // more details. | ||
12 | |||
13 | //! @file | ||
14 | //! | ||
15 | //! Some low-level simulator definitions. | ||
16 | //! | ||
17 | |||
18 | #ifndef __ARCH_SIM_DEF_H__ | ||
19 | #define __ARCH_SIM_DEF_H__ | ||
20 | |||
21 | |||
22 | //! Internal: the low bits of the SIM_CONTROL_* SPR values specify | ||
23 | //! the operation to perform, and the remaining bits are | ||
24 | //! an operation-specific parameter (often unused). | ||
25 | //! | ||
26 | #define _SIM_CONTROL_OPERATOR_BITS 8 | ||
27 | |||
28 | |||
29 | //== Values which can be written to SPR_SIM_CONTROL. | ||
30 | |||
31 | //! If written to SPR_SIM_CONTROL, stops profiling. | ||
32 | //! | ||
33 | #define SIM_CONTROL_PROFILER_DISABLE 0 | ||
34 | |||
35 | //! If written to SPR_SIM_CONTROL, starts profiling. | ||
36 | //! | ||
37 | #define SIM_CONTROL_PROFILER_ENABLE 1 | ||
38 | |||
39 | //! If written to SPR_SIM_CONTROL, clears profiling counters. | ||
40 | //! | ||
41 | #define SIM_CONTROL_PROFILER_CLEAR 2 | ||
42 | |||
43 | //! If written to SPR_SIM_CONTROL, checkpoints the simulator. | ||
44 | //! | ||
45 | #define SIM_CONTROL_CHECKPOINT 3 | ||
46 | |||
47 | //! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), | ||
48 | //! sets the tracing mask to the given mask. See "sim_set_tracing()". | ||
49 | //! | ||
50 | #define SIM_CONTROL_SET_TRACING 4 | ||
51 | |||
52 | //! If written to SPR_SIM_CONTROL, combined with a mask (shifted by 8), | ||
53 | //! dumps the requested items of machine state to the log. | ||
54 | //! | ||
55 | #define SIM_CONTROL_DUMP 5 | ||
56 | |||
57 | //! If written to SPR_SIM_CONTROL, clears chip-level profiling counters. | ||
58 | //! | ||
59 | #define SIM_CONTROL_PROFILER_CHIP_CLEAR 6 | ||
60 | |||
61 | //! If written to SPR_SIM_CONTROL, disables chip-level profiling. | ||
62 | //! | ||
63 | #define SIM_CONTROL_PROFILER_CHIP_DISABLE 7 | ||
64 | |||
65 | //! If written to SPR_SIM_CONTROL, enables chip-level profiling. | ||
66 | //! | ||
67 | #define SIM_CONTROL_PROFILER_CHIP_ENABLE 8 | ||
68 | |||
69 | //! If written to SPR_SIM_CONTROL, enables chip-level functional mode | ||
70 | //! | ||
71 | #define SIM_CONTROL_ENABLE_FUNCTIONAL 9 | ||
72 | |||
73 | //! If written to SPR_SIM_CONTROL, disables chip-level functional mode. | ||
74 | //! | ||
75 | #define SIM_CONTROL_DISABLE_FUNCTIONAL 10 | ||
76 | |||
77 | //! If written to SPR_SIM_CONTROL, enables chip-level functional mode. | ||
78 | //! All tiles must perform this write for functional mode to be enabled. | ||
79 | //! Ignored in naked boot mode unless --functional is specified. | ||
80 | //! WARNING: Only the hypervisor startup code should use this! | ||
81 | //! | ||
82 | #define SIM_CONTROL_ENABLE_FUNCTIONAL_BARRIER 11 | ||
83 | |||
84 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
85 | //! writes a string directly to the simulator output. Written to once for | ||
86 | //! each character in the string, plus a final NUL. Instead of NUL, | ||
87 | //! you can also use "SIM_PUTC_FLUSH_STRING" or "SIM_PUTC_FLUSH_BINARY". | ||
88 | //! | ||
89 | // ISSUE: Document the meaning of "newline", and the handling of NUL. | ||
90 | // | ||
91 | #define SIM_CONTROL_PUTC 12 | ||
92 | |||
93 | //! If written to SPR_SIM_CONTROL, clears the --grind-coherence state for | ||
94 | //! this core. This is intended to be used before a loop that will | ||
95 | //! invalidate the cache by loading new data and evicting all current data. | ||
96 | //! Generally speaking, this API should only be used by system code. | ||
97 | //! | ||
98 | #define SIM_CONTROL_GRINDER_CLEAR 13 | ||
99 | |||
100 | //! If written to SPR_SIM_CONTROL, shuts down the simulator. | ||
101 | //! | ||
102 | #define SIM_CONTROL_SHUTDOWN 14 | ||
103 | |||
104 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
105 | //! indicates that a fork syscall just created the given process. | ||
106 | //! | ||
107 | #define SIM_CONTROL_OS_FORK 15 | ||
108 | |||
109 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
110 | //! indicates that an exit syscall was just executed by the given process. | ||
111 | //! | ||
112 | #define SIM_CONTROL_OS_EXIT 16 | ||
113 | |||
114 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
115 | //! indicates that the OS just switched to the given process. | ||
116 | //! | ||
117 | #define SIM_CONTROL_OS_SWITCH 17 | ||
118 | |||
119 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
120 | //! indicates that an exec syscall was just executed. Written to once for | ||
121 | //! each character in the executable name, plus a final NUL. | ||
122 | //! | ||
123 | #define SIM_CONTROL_OS_EXEC 18 | ||
124 | |||
125 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
126 | //! indicates that an interpreter (PT_INTERP) was loaded. Written to once | ||
127 | //! for each character in "ADDR:PATH", plus a final NUL, where "ADDR" is a | ||
128 | //! hex load address starting with "0x", and "PATH" is the executable name. | ||
129 | //! | ||
130 | #define SIM_CONTROL_OS_INTERP 19 | ||
131 | |||
132 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
133 | //! indicates that a dll was loaded. Written to once for each character | ||
134 | //! in "ADDR:PATH", plus a final NUL, where "ADDR" is a hexadecimal load | ||
135 | //! address starting with "0x", and "PATH" is the executable name. | ||
136 | //! | ||
137 | #define SIM_CONTROL_DLOPEN 20 | ||
138 | |||
139 | //! If written to SPR_SIM_CONTROL, combined with a character (shifted by 8), | ||
140 | //! indicates that a dll was unloaded. Written to once for each character | ||
141 | //! in "ADDR", plus a final NUL, where "ADDR" is a hexadecimal load | ||
142 | //! address starting with "0x". | ||
143 | //! | ||
144 | #define SIM_CONTROL_DLCLOSE 21 | ||
145 | |||
146 | //! If written to SPR_SIM_CONTROL, combined with a flag (shifted by 8), | ||
147 | //! indicates whether to allow data reads to remotely-cached | ||
148 | //! dirty cache lines to be cached locally without grinder warnings or | ||
149 | //! assertions (used by Linux kernel fast memcpy). | ||
150 | //! | ||
151 | #define SIM_CONTROL_ALLOW_MULTIPLE_CACHING 22 | ||
152 | |||
153 | //! If written to SPR_SIM_CONTROL, enables memory tracing. | ||
154 | //! | ||
155 | #define SIM_CONTROL_ENABLE_MEM_LOGGING 23 | ||
156 | |||
157 | //! If written to SPR_SIM_CONTROL, disables memory tracing. | ||
158 | //! | ||
159 | #define SIM_CONTROL_DISABLE_MEM_LOGGING 24 | ||
160 | |||
161 | //! If written to SPR_SIM_CONTROL, changes the shaping parameters of one of | ||
162 | //! the gbe or xgbe shims. Must specify the shim id, the type, the units, and | ||
163 | //! the rate, as defined in SIM_SHAPING_SPR_ARG. | ||
164 | //! | ||
165 | #define SIM_CONTROL_SHAPING 25 | ||
166 | |||
167 | //! If written to SPR_SIM_CONTROL, combined with character (shifted by 8), | ||
168 | //! requests that a simulator command be executed. Written to once for each | ||
169 | //! character in the command, plus a final NUL. | ||
170 | //! | ||
171 | #define SIM_CONTROL_COMMAND 26 | ||
172 | |||
173 | //! If written to SPR_SIM_CONTROL, indicates that the simulated system | ||
174 | //! is panicking, to allow debugging via --debug-on-panic. | ||
175 | //! | ||
176 | #define SIM_CONTROL_PANIC 27 | ||
177 | |||
178 | //! If written to SPR_SIM_CONTROL, triggers a simulator syscall. | ||
179 | //! See "sim_syscall()" for more info. | ||
180 | //! | ||
181 | #define SIM_CONTROL_SYSCALL 32 | ||
182 | |||
183 | //! If written to SPR_SIM_CONTROL, combined with a pid (shifted by 8), | ||
184 | //! provides the pid that subsequent SIM_CONTROL_OS_FORK writes should | ||
185 | //! use as the pid, rather than the default previous SIM_CONTROL_OS_SWITCH. | ||
186 | //! | ||
187 | #define SIM_CONTROL_OS_FORK_PARENT 33 | ||
188 | |||
189 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
190 | //! (shifted by 8), clears the pending magic data section. The cleared | ||
191 | //! pending magic data section and any subsequently appended magic bytes | ||
192 | //! will only take effect when the classifier blast programmer is run. | ||
193 | #define SIM_CONTROL_CLEAR_MPIPE_MAGIC_BYTES 34 | ||
194 | |||
195 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
196 | //! (shifted by 8) and a byte of data (shifted by 16), appends that byte | ||
197 | //! to the shim's pending magic data section. The pending magic data | ||
198 | //! section takes effect when the classifier blast programmer is run. | ||
199 | #define SIM_CONTROL_APPEND_MPIPE_MAGIC_BYTE 35 | ||
200 | |||
201 | //! If written to SPR_SIM_CONTROL, combined with a mPIPE shim number | ||
202 | //! (shifted by 8), an enable=1/disable=0 bit (shifted by 16), and a | ||
203 | //! mask of links (shifted by 32), enable or disable the corresponding | ||
204 | //! mPIPE links. | ||
205 | #define SIM_CONTROL_ENABLE_MPIPE_LINK_MAGIC_BYTE 36 | ||
206 | |||
207 | //== Syscall numbers for use with "sim_syscall()". | ||
208 | |||
209 | //! Syscall number for sim_add_watchpoint(). | ||
210 | //! | ||
211 | #define SIM_SYSCALL_ADD_WATCHPOINT 2 | ||
212 | |||
213 | //! Syscall number for sim_remove_watchpoint(). | ||
214 | //! | ||
215 | #define SIM_SYSCALL_REMOVE_WATCHPOINT 3 | ||
216 | |||
217 | //! Syscall number for sim_query_watchpoint(). | ||
218 | //! | ||
219 | #define SIM_SYSCALL_QUERY_WATCHPOINT 4 | ||
220 | |||
221 | //! Syscall number that asserts that the cache lines whose 64-bit PA | ||
222 | //! is passed as the second argument to sim_syscall(), and over a | ||
223 | //! range passed as the third argument, are no longer in cache. | ||
224 | //! The simulator raises an error if this is not the case. | ||
225 | //! | ||
226 | #define SIM_SYSCALL_VALIDATE_LINES_EVICTED 5 | ||
227 | |||
228 | |||
229 | //== Bit masks which can be shifted by 8, combined with | ||
230 | //== SIM_CONTROL_SET_TRACING, and written to SPR_SIM_CONTROL. | ||
231 | |||
232 | //! @addtogroup arch_sim | ||
233 | //! @{ | ||
234 | |||
235 | //! Enable --trace-cycle when passed to simulator_set_tracing(). | ||
236 | //! | ||
237 | #define SIM_TRACE_CYCLES 0x01 | ||
238 | |||
239 | //! Enable --trace-router when passed to simulator_set_tracing(). | ||
240 | //! | ||
241 | #define SIM_TRACE_ROUTER 0x02 | ||
242 | |||
243 | //! Enable --trace-register-writes when passed to simulator_set_tracing(). | ||
244 | //! | ||
245 | #define SIM_TRACE_REGISTER_WRITES 0x04 | ||
246 | |||
247 | //! Enable --trace-disasm when passed to simulator_set_tracing(). | ||
248 | //! | ||
249 | #define SIM_TRACE_DISASM 0x08 | ||
250 | |||
251 | //! Enable --trace-stall-info when passed to simulator_set_tracing(). | ||
252 | //! | ||
253 | #define SIM_TRACE_STALL_INFO 0x10 | ||
254 | |||
255 | //! Enable --trace-memory-controller when passed to simulator_set_tracing(). | ||
256 | //! | ||
257 | #define SIM_TRACE_MEMORY_CONTROLLER 0x20 | ||
258 | |||
259 | //! Enable --trace-l2 when passed to simulator_set_tracing(). | ||
260 | //! | ||
261 | #define SIM_TRACE_L2_CACHE 0x40 | ||
262 | |||
263 | //! Enable --trace-lines when passed to simulator_set_tracing(). | ||
264 | //! | ||
265 | #define SIM_TRACE_LINES 0x80 | ||
266 | |||
267 | //! Turn off all tracing when passed to simulator_set_tracing(). | ||
268 | //! | ||
269 | #define SIM_TRACE_NONE 0 | ||
270 | |||
271 | //! Turn on all tracing when passed to simulator_set_tracing(). | ||
272 | //! | ||
273 | #define SIM_TRACE_ALL (-1) | ||
274 | |||
275 | //! @} | ||
276 | |||
277 | //! Computes the value to write to SPR_SIM_CONTROL to set tracing flags. | ||
278 | //! | ||
279 | #define SIM_TRACE_SPR_ARG(mask) \ | ||
280 | (SIM_CONTROL_SET_TRACING | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
281 | |||
282 | |||
283 | //== Bit masks which can be shifted by 8, combined with | ||
284 | //== SIM_CONTROL_DUMP, and written to SPR_SIM_CONTROL. | ||
285 | |||
286 | //! @addtogroup arch_sim | ||
287 | //! @{ | ||
288 | |||
289 | //! Dump the general-purpose registers. | ||
290 | //! | ||
291 | #define SIM_DUMP_REGS 0x001 | ||
292 | |||
293 | //! Dump the SPRs. | ||
294 | //! | ||
295 | #define SIM_DUMP_SPRS 0x002 | ||
296 | |||
297 | //! Dump the ITLB. | ||
298 | //! | ||
299 | #define SIM_DUMP_ITLB 0x004 | ||
300 | |||
301 | //! Dump the DTLB. | ||
302 | //! | ||
303 | #define SIM_DUMP_DTLB 0x008 | ||
304 | |||
305 | //! Dump the L1 I-cache. | ||
306 | //! | ||
307 | #define SIM_DUMP_L1I 0x010 | ||
308 | |||
309 | //! Dump the L1 D-cache. | ||
310 | //! | ||
311 | #define SIM_DUMP_L1D 0x020 | ||
312 | |||
313 | //! Dump the L2 cache. | ||
314 | //! | ||
315 | #define SIM_DUMP_L2 0x040 | ||
316 | |||
317 | //! Dump the switch registers. | ||
318 | //! | ||
319 | #define SIM_DUMP_SNREGS 0x080 | ||
320 | |||
321 | //! Dump the switch ITLB. | ||
322 | //! | ||
323 | #define SIM_DUMP_SNITLB 0x100 | ||
324 | |||
325 | //! Dump the switch L1 I-cache. | ||
326 | //! | ||
327 | #define SIM_DUMP_SNL1I 0x200 | ||
328 | |||
329 | //! Dump the current backtrace. | ||
330 | //! | ||
331 | #define SIM_DUMP_BACKTRACE 0x400 | ||
332 | |||
333 | //! Only dump valid lines in caches. | ||
334 | //! | ||
335 | #define SIM_DUMP_VALID_LINES 0x800 | ||
336 | |||
337 | //! Dump everything that is dumpable. | ||
338 | //! | ||
339 | #define SIM_DUMP_ALL (-1 & ~SIM_DUMP_VALID_LINES) | ||
340 | |||
341 | // @} | ||
342 | |||
343 | //! Computes the value to write to SPR_SIM_CONTROL to dump machine state. | ||
344 | //! | ||
345 | #define SIM_DUMP_SPR_ARG(mask) \ | ||
346 | (SIM_CONTROL_DUMP | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
347 | |||
348 | |||
349 | //== Bit masks which can be shifted by 8, combined with | ||
350 | //== SIM_CONTROL_PROFILER_CHIP_xxx, and written to SPR_SIM_CONTROL. | ||
351 | |||
352 | //! @addtogroup arch_sim | ||
353 | //! @{ | ||
354 | |||
355 | //! Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. | ||
356 | //! | ||
357 | #define SIM_CHIP_MEMCTL 0x001 | ||
358 | |||
359 | //! Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. | ||
360 | //! | ||
361 | #define SIM_CHIP_XAUI 0x002 | ||
362 | |||
363 | //! Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. | ||
364 | //! | ||
365 | #define SIM_CHIP_PCIE 0x004 | ||
366 | |||
367 | //! Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. | ||
368 | //! | ||
369 | #define SIM_CHIP_MPIPE 0x008 | ||
370 | |||
371 | //! Reference all chip devices. | ||
372 | //! | ||
373 | #define SIM_CHIP_ALL (-1) | ||
374 | |||
375 | //! @} | ||
376 | |||
377 | //! Computes the value to write to SPR_SIM_CONTROL to clear chip statistics. | ||
378 | //! | ||
379 | #define SIM_PROFILER_CHIP_CLEAR_SPR_ARG(mask) \ | ||
380 | (SIM_CONTROL_PROFILER_CHIP_CLEAR | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
381 | |||
382 | //! Computes the value to write to SPR_SIM_CONTROL to disable chip statistics. | ||
383 | //! | ||
384 | #define SIM_PROFILER_CHIP_DISABLE_SPR_ARG(mask) \ | ||
385 | (SIM_CONTROL_PROFILER_CHIP_DISABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
386 | |||
387 | //! Computes the value to write to SPR_SIM_CONTROL to enable chip statistics. | ||
388 | //! | ||
389 | #define SIM_PROFILER_CHIP_ENABLE_SPR_ARG(mask) \ | ||
390 | (SIM_CONTROL_PROFILER_CHIP_ENABLE | ((mask) << _SIM_CONTROL_OPERATOR_BITS)) | ||
391 | |||
392 | |||
393 | |||
394 | // Shim bitrate controls. | ||
395 | |||
396 | //! The number of bits used to store the shim id. | ||
397 | //! | ||
398 | #define SIM_CONTROL_SHAPING_SHIM_ID_BITS 3 | ||
399 | |||
400 | //! @addtogroup arch_sim | ||
401 | //! @{ | ||
402 | |||
403 | //! Change the gbe 0 bitrate. | ||
404 | //! | ||
405 | #define SIM_CONTROL_SHAPING_GBE_0 0x0 | ||
406 | |||
407 | //! Change the gbe 1 bitrate. | ||
408 | //! | ||
409 | #define SIM_CONTROL_SHAPING_GBE_1 0x1 | ||
410 | |||
411 | //! Change the gbe 2 bitrate. | ||
412 | //! | ||
413 | #define SIM_CONTROL_SHAPING_GBE_2 0x2 | ||
414 | |||
415 | //! Change the gbe 3 bitrate. | ||
416 | //! | ||
417 | #define SIM_CONTROL_SHAPING_GBE_3 0x3 | ||
418 | |||
419 | //! Change the xgbe 0 bitrate. | ||
420 | //! | ||
421 | #define SIM_CONTROL_SHAPING_XGBE_0 0x4 | ||
422 | |||
423 | //! Change the xgbe 1 bitrate. | ||
424 | //! | ||
425 | #define SIM_CONTROL_SHAPING_XGBE_1 0x5 | ||
426 | |||
427 | //! The type of shaping to do. | ||
428 | //! | ||
429 | #define SIM_CONTROL_SHAPING_TYPE_BITS 2 | ||
430 | |||
431 | //! Control the multiplier. | ||
432 | //! | ||
433 | #define SIM_CONTROL_SHAPING_MULTIPLIER 0 | ||
434 | |||
435 | //! Control the PPS. | ||
436 | //! | ||
437 | #define SIM_CONTROL_SHAPING_PPS 1 | ||
438 | |||
439 | //! Control the BPS. | ||
440 | //! | ||
441 | #define SIM_CONTROL_SHAPING_BPS 2 | ||
442 | |||
443 | //! The number of bits for the units for the shaping parameter. | ||
444 | //! | ||
445 | #define SIM_CONTROL_SHAPING_UNITS_BITS 2 | ||
446 | |||
447 | //! Provide a number in single units. | ||
448 | //! | ||
449 | #define SIM_CONTROL_SHAPING_UNITS_SINGLE 0 | ||
450 | |||
451 | //! Provide a number in kilo units. | ||
452 | //! | ||
453 | #define SIM_CONTROL_SHAPING_UNITS_KILO 1 | ||
454 | |||
455 | //! Provide a number in mega units. | ||
456 | //! | ||
457 | #define SIM_CONTROL_SHAPING_UNITS_MEGA 2 | ||
458 | |||
459 | //! Provide a number in giga units. | ||
460 | //! | ||
461 | #define SIM_CONTROL_SHAPING_UNITS_GIGA 3 | ||
462 | |||
463 | // @} | ||
464 | |||
465 | //! How many bits are available for the rate. | ||
466 | //! | ||
467 | #define SIM_CONTROL_SHAPING_RATE_BITS \ | ||
468 | (32 - (_SIM_CONTROL_OPERATOR_BITS + \ | ||
469 | SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
470 | SIM_CONTROL_SHAPING_TYPE_BITS + \ | ||
471 | SIM_CONTROL_SHAPING_UNITS_BITS)) | ||
472 | |||
473 | //! Computes the value to write to SPR_SIM_CONTROL to change a bitrate. | ||
474 | //! | ||
475 | #define SIM_SHAPING_SPR_ARG(shim, type, units, rate) \ | ||
476 | (SIM_CONTROL_SHAPING | \ | ||
477 | ((shim) | \ | ||
478 | ((type) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS)) | \ | ||
479 | ((units) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
480 | SIM_CONTROL_SHAPING_TYPE_BITS)) | \ | ||
481 | ((rate) << (SIM_CONTROL_SHAPING_SHIM_ID_BITS + \ | ||
482 | SIM_CONTROL_SHAPING_TYPE_BITS + \ | ||
483 | SIM_CONTROL_SHAPING_UNITS_BITS))) << _SIM_CONTROL_OPERATOR_BITS) | ||
484 | |||
485 | |||
486 | //== Values returned when reading SPR_SIM_CONTROL. | ||
487 | // ISSUE: These names should share a longer common prefix. | ||
488 | |||
489 | //! When reading SPR_SIM_CONTROL, the mask of simulator tracing bits | ||
490 | //! (SIM_TRACE_xxx values). | ||
491 | //! | ||
492 | #define SIM_TRACE_FLAG_MASK 0xFFFF | ||
493 | |||
494 | //! When reading SPR_SIM_CONTROL, the mask for whether profiling is enabled. | ||
495 | //! | ||
496 | #define SIM_PROFILER_ENABLED_MASK 0x10000 | ||
497 | |||
498 | |||
499 | //== Special arguments for "SIM_CONTROL_PUTC". | ||
500 | |||
501 | //! Flag value for forcing a PUTC string-flush, including | ||
502 | //! coordinate/cycle prefix and newline. | ||
503 | //! | ||
504 | #define SIM_PUTC_FLUSH_STRING 0x100 | ||
505 | |||
506 | //! Flag value for forcing a PUTC binary-data-flush, which skips the | ||
507 | //! prefix and does not append a newline. | ||
508 | //! | ||
509 | #define SIM_PUTC_FLUSH_BINARY 0x101 | ||
510 | |||
511 | |||
512 | #endif //__ARCH_SIM_DEF_H__ | ||
diff --git a/arch/tile/include/arch/spr_def.h b/arch/tile/include/arch/spr_def.h new file mode 100644 index 000000000000..c8fdbd9a45e6 --- /dev/null +++ b/arch/tile/include/arch/spr_def.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifdef __tilegx__ | ||
16 | #include <arch/spr_def_64.h> | ||
17 | #else | ||
18 | #include <arch/spr_def_32.h> | ||
19 | #endif | ||
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h new file mode 100644 index 000000000000..b4fc06864df6 --- /dev/null +++ b/arch/tile/include/arch/spr_def_32.h | |||
@@ -0,0 +1,162 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef __DOXYGEN__ | ||
16 | |||
17 | #ifndef __ARCH_SPR_DEF_H__ | ||
18 | #define __ARCH_SPR_DEF_H__ | ||
19 | |||
20 | #define SPR_AUX_PERF_COUNT_0 0x6005 | ||
21 | #define SPR_AUX_PERF_COUNT_1 0x6006 | ||
22 | #define SPR_AUX_PERF_COUNT_CTL 0x6007 | ||
23 | #define SPR_AUX_PERF_COUNT_STS 0x6008 | ||
24 | #define SPR_CYCLE_HIGH 0x4e06 | ||
25 | #define SPR_CYCLE_LOW 0x4e07 | ||
26 | #define SPR_DMA_BYTE 0x3900 | ||
27 | #define SPR_DMA_CHUNK_SIZE 0x3901 | ||
28 | #define SPR_DMA_CTR 0x3902 | ||
29 | #define SPR_DMA_CTR__REQUEST_MASK 0x1 | ||
30 | #define SPR_DMA_CTR__SUSPEND_MASK 0x2 | ||
31 | #define SPR_DMA_DST_ADDR 0x3903 | ||
32 | #define SPR_DMA_DST_CHUNK_ADDR 0x3904 | ||
33 | #define SPR_DMA_SRC_ADDR 0x3905 | ||
34 | #define SPR_DMA_SRC_CHUNK_ADDR 0x3906 | ||
35 | #define SPR_DMA_STATUS__DONE_MASK 0x1 | ||
36 | #define SPR_DMA_STATUS__BUSY_MASK 0x2 | ||
37 | #define SPR_DMA_STATUS__RUNNING_MASK 0x10 | ||
38 | #define SPR_DMA_STRIDE 0x3907 | ||
39 | #define SPR_DMA_USER_STATUS 0x3908 | ||
40 | #define SPR_DONE 0x4e08 | ||
41 | #define SPR_EVENT_BEGIN 0x4e0d | ||
42 | #define SPR_EVENT_END 0x4e0e | ||
43 | #define SPR_EX_CONTEXT_0_0 0x4a05 | ||
44 | #define SPR_EX_CONTEXT_0_1 0x4a06 | ||
45 | #define SPR_EX_CONTEXT_0_1__PL_SHIFT 0 | ||
46 | #define SPR_EX_CONTEXT_0_1__PL_RMASK 0x3 | ||
47 | #define SPR_EX_CONTEXT_0_1__PL_MASK 0x3 | ||
48 | #define SPR_EX_CONTEXT_0_1__ICS_SHIFT 2 | ||
49 | #define SPR_EX_CONTEXT_0_1__ICS_RMASK 0x1 | ||
50 | #define SPR_EX_CONTEXT_0_1__ICS_MASK 0x4 | ||
51 | #define SPR_EX_CONTEXT_1_0 0x4805 | ||
52 | #define SPR_EX_CONTEXT_1_1 0x4806 | ||
53 | #define SPR_EX_CONTEXT_1_1__PL_SHIFT 0 | ||
54 | #define SPR_EX_CONTEXT_1_1__PL_RMASK 0x3 | ||
55 | #define SPR_EX_CONTEXT_1_1__PL_MASK 0x3 | ||
56 | #define SPR_EX_CONTEXT_1_1__ICS_SHIFT 2 | ||
57 | #define SPR_EX_CONTEXT_1_1__ICS_RMASK 0x1 | ||
58 | #define SPR_EX_CONTEXT_1_1__ICS_MASK 0x4 | ||
59 | #define SPR_FAIL 0x4e09 | ||
60 | #define SPR_INTCTRL_0_STATUS 0x4a07 | ||
61 | #define SPR_INTCTRL_1_STATUS 0x4807 | ||
62 | #define SPR_INTERRUPT_CRITICAL_SECTION 0x4e0a | ||
63 | #define SPR_INTERRUPT_MASK_0_0 0x4a08 | ||
64 | #define SPR_INTERRUPT_MASK_0_1 0x4a09 | ||
65 | #define SPR_INTERRUPT_MASK_1_0 0x4809 | ||
66 | #define SPR_INTERRUPT_MASK_1_1 0x480a | ||
67 | #define SPR_INTERRUPT_MASK_RESET_0_0 0x4a0a | ||
68 | #define SPR_INTERRUPT_MASK_RESET_0_1 0x4a0b | ||
69 | #define SPR_INTERRUPT_MASK_RESET_1_0 0x480b | ||
70 | #define SPR_INTERRUPT_MASK_RESET_1_1 0x480c | ||
71 | #define SPR_INTERRUPT_MASK_SET_0_0 0x4a0c | ||
72 | #define SPR_INTERRUPT_MASK_SET_0_1 0x4a0d | ||
73 | #define SPR_INTERRUPT_MASK_SET_1_0 0x480d | ||
74 | #define SPR_INTERRUPT_MASK_SET_1_1 0x480e | ||
75 | #define SPR_MPL_DMA_CPL_SET_0 0x5800 | ||
76 | #define SPR_MPL_DMA_CPL_SET_1 0x5801 | ||
77 | #define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 | ||
78 | #define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 | ||
79 | #define SPR_MPL_INTCTRL_0_SET_0 0x4a00 | ||
80 | #define SPR_MPL_INTCTRL_0_SET_1 0x4a01 | ||
81 | #define SPR_MPL_INTCTRL_1_SET_0 0x4800 | ||
82 | #define SPR_MPL_INTCTRL_1_SET_1 0x4801 | ||
83 | #define SPR_MPL_SN_ACCESS_SET_0 0x0800 | ||
84 | #define SPR_MPL_SN_ACCESS_SET_1 0x0801 | ||
85 | #define SPR_MPL_SN_CPL_SET_0 0x5a00 | ||
86 | #define SPR_MPL_SN_CPL_SET_1 0x5a01 | ||
87 | #define SPR_MPL_SN_FIREWALL_SET_0 0x2c00 | ||
88 | #define SPR_MPL_SN_FIREWALL_SET_1 0x2c01 | ||
89 | #define SPR_MPL_SN_NOTIFY_SET_0 0x2a00 | ||
90 | #define SPR_MPL_SN_NOTIFY_SET_1 0x2a01 | ||
91 | #define SPR_MPL_UDN_ACCESS_SET_0 0x0c00 | ||
92 | #define SPR_MPL_UDN_ACCESS_SET_1 0x0c01 | ||
93 | #define SPR_MPL_UDN_AVAIL_SET_0 0x4000 | ||
94 | #define SPR_MPL_UDN_AVAIL_SET_1 0x4001 | ||
95 | #define SPR_MPL_UDN_CA_SET_0 0x3c00 | ||
96 | #define SPR_MPL_UDN_CA_SET_1 0x3c01 | ||
97 | #define SPR_MPL_UDN_COMPLETE_SET_0 0x1400 | ||
98 | #define SPR_MPL_UDN_COMPLETE_SET_1 0x1401 | ||
99 | #define SPR_MPL_UDN_FIREWALL_SET_0 0x3000 | ||
100 | #define SPR_MPL_UDN_FIREWALL_SET_1 0x3001 | ||
101 | #define SPR_MPL_UDN_REFILL_SET_0 0x1000 | ||
102 | #define SPR_MPL_UDN_REFILL_SET_1 0x1001 | ||
103 | #define SPR_MPL_UDN_TIMER_SET_0 0x3600 | ||
104 | #define SPR_MPL_UDN_TIMER_SET_1 0x3601 | ||
105 | #define SPR_MPL_WORLD_ACCESS_SET_0 0x4e00 | ||
106 | #define SPR_MPL_WORLD_ACCESS_SET_1 0x4e01 | ||
107 | #define SPR_PASS 0x4e0b | ||
108 | #define SPR_PERF_COUNT_0 0x4205 | ||
109 | #define SPR_PERF_COUNT_1 0x4206 | ||
110 | #define SPR_PERF_COUNT_CTL 0x4207 | ||
111 | #define SPR_PERF_COUNT_STS 0x4208 | ||
112 | #define SPR_PROC_STATUS 0x4f00 | ||
113 | #define SPR_SIM_CONTROL 0x4e0c | ||
114 | #define SPR_SNCTL 0x0805 | ||
115 | #define SPR_SNCTL__FRZFABRIC_MASK 0x1 | ||
116 | #define SPR_SNCTL__FRZPROC_MASK 0x2 | ||
117 | #define SPR_SNPC 0x080b | ||
118 | #define SPR_SNSTATIC 0x080c | ||
119 | #define SPR_SYSTEM_SAVE_0_0 0x4b00 | ||
120 | #define SPR_SYSTEM_SAVE_0_1 0x4b01 | ||
121 | #define SPR_SYSTEM_SAVE_0_2 0x4b02 | ||
122 | #define SPR_SYSTEM_SAVE_0_3 0x4b03 | ||
123 | #define SPR_SYSTEM_SAVE_1_0 0x4900 | ||
124 | #define SPR_SYSTEM_SAVE_1_1 0x4901 | ||
125 | #define SPR_SYSTEM_SAVE_1_2 0x4902 | ||
126 | #define SPR_SYSTEM_SAVE_1_3 0x4903 | ||
127 | #define SPR_TILE_COORD 0x4c17 | ||
128 | #define SPR_TILE_RTF_HWM 0x4e10 | ||
129 | #define SPR_TILE_TIMER_CONTROL 0x3205 | ||
130 | #define SPR_TILE_WRITE_PENDING 0x4e0f | ||
131 | #define SPR_UDN_AVAIL_EN 0x4005 | ||
132 | #define SPR_UDN_CA_DATA 0x0d00 | ||
133 | #define SPR_UDN_DATA_AVAIL 0x0d03 | ||
134 | #define SPR_UDN_DEADLOCK_TIMEOUT 0x3606 | ||
135 | #define SPR_UDN_DEMUX_CA_COUNT 0x0c05 | ||
136 | #define SPR_UDN_DEMUX_COUNT_0 0x0c06 | ||
137 | #define SPR_UDN_DEMUX_COUNT_1 0x0c07 | ||
138 | #define SPR_UDN_DEMUX_COUNT_2 0x0c08 | ||
139 | #define SPR_UDN_DEMUX_COUNT_3 0x0c09 | ||
140 | #define SPR_UDN_DEMUX_CTL 0x0c0a | ||
141 | #define SPR_UDN_DEMUX_QUEUE_SEL 0x0c0c | ||
142 | #define SPR_UDN_DEMUX_STATUS 0x0c0d | ||
143 | #define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e | ||
144 | #define SPR_UDN_DIRECTION_PROTECT 0x3005 | ||
145 | #define SPR_UDN_REFILL_EN 0x1005 | ||
146 | #define SPR_UDN_SP_FIFO_DATA 0x0c11 | ||
147 | #define SPR_UDN_SP_FIFO_SEL 0x0c12 | ||
148 | #define SPR_UDN_SP_FREEZE 0x0c13 | ||
149 | #define SPR_UDN_SP_FREEZE__SP_FRZ_MASK 0x1 | ||
150 | #define SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2 | ||
151 | #define SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4 | ||
152 | #define SPR_UDN_SP_STATE 0x0c14 | ||
153 | #define SPR_UDN_TAG_0 0x0c15 | ||
154 | #define SPR_UDN_TAG_1 0x0c16 | ||
155 | #define SPR_UDN_TAG_2 0x0c17 | ||
156 | #define SPR_UDN_TAG_3 0x0c18 | ||
157 | #define SPR_UDN_TAG_VALID 0x0c19 | ||
158 | #define SPR_UDN_TILE_COORD 0x0c1a | ||
159 | |||
160 | #endif /* !defined(__ARCH_SPR_DEF_H__) */ | ||
161 | |||
162 | #endif /* !defined(__DOXYGEN__) */ | ||
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild new file mode 100644 index 000000000000..3b8f55b82dee --- /dev/null +++ b/arch/tile/include/asm/Kbuild | |||
@@ -0,0 +1,3 @@ | |||
1 | include include/asm-generic/Kbuild.asm | ||
2 | |||
3 | header-y += ucontext.h | ||
diff --git a/arch/tile/include/asm/asm-offsets.h b/arch/tile/include/asm/asm-offsets.h new file mode 100644 index 000000000000..d370ee36a182 --- /dev/null +++ b/arch/tile/include/asm/asm-offsets.h | |||
@@ -0,0 +1 @@ | |||
#include <generated/asm-offsets.h> | |||
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h new file mode 100644 index 000000000000..b8c49f98a44c --- /dev/null +++ b/arch/tile/include/asm/atomic.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Atomic primitives. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_H | ||
18 | #define _ASM_TILE_ATOMIC_H | ||
19 | |||
20 | #ifndef __ASSEMBLY__ | ||
21 | |||
22 | #include <linux/compiler.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #define ATOMIC_INIT(i) { (i) } | ||
26 | |||
27 | /** | ||
28 | * atomic_read - read atomic variable | ||
29 | * @v: pointer of type atomic_t | ||
30 | * | ||
31 | * Atomically reads the value of @v. | ||
32 | */ | ||
33 | static inline int atomic_read(const atomic_t *v) | ||
34 | { | ||
35 | return v->counter; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * atomic_sub_return - subtract integer and return | ||
40 | * @v: pointer of type atomic_t | ||
41 | * @i: integer value to subtract | ||
42 | * | ||
43 | * Atomically subtracts @i from @v and returns @v - @i | ||
44 | */ | ||
45 | #define atomic_sub_return(i, v) atomic_add_return((int)(-(i)), (v)) | ||
46 | |||
47 | /** | ||
48 | * atomic_sub - subtract integer from atomic variable | ||
49 | * @i: integer value to subtract | ||
50 | * @v: pointer of type atomic_t | ||
51 | * | ||
52 | * Atomically subtracts @i from @v. | ||
53 | */ | ||
54 | #define atomic_sub(i, v) atomic_add((int)(-(i)), (v)) | ||
55 | |||
56 | /** | ||
57 | * atomic_sub_and_test - subtract value from variable and test result | ||
58 | * @i: integer value to subtract | ||
59 | * @v: pointer of type atomic_t | ||
60 | * | ||
61 | * Atomically subtracts @i from @v and returns true if the result is | ||
62 | * zero, or false for all other cases. | ||
63 | */ | ||
64 | #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) | ||
65 | |||
66 | /** | ||
67 | * atomic_inc_return - increment memory and return | ||
68 | * @v: pointer of type atomic_t | ||
69 | * | ||
70 | * Atomically increments @v by 1 and returns the new value. | ||
71 | */ | ||
72 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
73 | |||
74 | /** | ||
75 | * atomic_dec_return - decrement memory and return | ||
76 | * @v: pointer of type atomic_t | ||
77 | * | ||
78 | * Atomically decrements @v by 1 and returns the new value. | ||
79 | */ | ||
80 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
81 | |||
82 | /** | ||
83 | * atomic_inc - increment atomic variable | ||
84 | * @v: pointer of type atomic_t | ||
85 | * | ||
86 | * Atomically increments @v by 1. | ||
87 | */ | ||
88 | #define atomic_inc(v) atomic_add(1, (v)) | ||
89 | |||
90 | /** | ||
91 | * atomic_dec - decrement atomic variable | ||
92 | * @v: pointer of type atomic_t | ||
93 | * | ||
94 | * Atomically decrements @v by 1. | ||
95 | */ | ||
96 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
97 | |||
98 | /** | ||
99 | * atomic_dec_and_test - decrement and test | ||
100 | * @v: pointer of type atomic_t | ||
101 | * | ||
102 | * Atomically decrements @v by 1 and returns true if the result is 0. | ||
103 | */ | ||
104 | #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) | ||
105 | |||
106 | /** | ||
107 | * atomic_inc_and_test - increment and test | ||
108 | * @v: pointer of type atomic_t | ||
109 | * | ||
110 | * Atomically increments @v by 1 and returns true if the result is 0. | ||
111 | */ | ||
112 | #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) | ||
113 | |||
114 | /** | ||
115 | * atomic_add_negative - add and test if negative | ||
116 | * @v: pointer of type atomic_t | ||
117 | * @i: integer value to add | ||
118 | * | ||
119 | * Atomically adds @i to @v and returns true if the result is | ||
120 | * negative, or false when result is greater than or equal to zero. | ||
121 | */ | ||
122 | #define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) | ||
123 | |||
124 | /** | ||
125 | * atomic_inc_not_zero - increment unless the number is zero | ||
126 | * @v: pointer of type atomic_t | ||
127 | * | ||
128 | * Atomically increments @v by 1, so long as @v is non-zero. | ||
129 | * Returns non-zero if @v was non-zero, and zero otherwise. | ||
130 | */ | ||
131 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
132 | |||
133 | |||
134 | /* | ||
135 | * We define xchg() and cmpxchg() in the included headers. | ||
136 | * Note that we do not define __HAVE_ARCH_CMPXCHG, since that would imply | ||
137 | * that cmpxchg() is an efficient operation, which is not particularly true. | ||
138 | */ | ||
139 | |||
140 | /* Nonexistent functions intended to cause link errors. */ | ||
141 | extern unsigned long __xchg_called_with_bad_pointer(void); | ||
142 | extern unsigned long __cmpxchg_called_with_bad_pointer(void); | ||
143 | |||
144 | #define tas(ptr) (xchg((ptr), 1)) | ||
145 | |||
146 | #endif /* __ASSEMBLY__ */ | ||
147 | |||
148 | #ifndef __tilegx__ | ||
149 | #include <asm/atomic_32.h> | ||
150 | #else | ||
151 | #include <asm/atomic_64.h> | ||
152 | #endif | ||
153 | |||
154 | /* Provide the appropriate atomic_long_t definitions. */ | ||
155 | #ifndef __ASSEMBLY__ | ||
156 | #include <asm-generic/atomic-long.h> | ||
157 | #endif | ||
158 | |||
159 | #endif /* _ASM_TILE_ATOMIC_H */ | ||
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h new file mode 100644 index 000000000000..e4f8b4f04895 --- /dev/null +++ b/arch/tile/include/asm/atomic_32.h | |||
@@ -0,0 +1,353 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Do not include directly; use <asm/atomic.h>. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_ATOMIC_32_H | ||
18 | #define _ASM_TILE_ATOMIC_32_H | ||
19 | |||
20 | #include <arch/chip.h> | ||
21 | |||
22 | #ifndef __ASSEMBLY__ | ||
23 | |||
24 | /* Tile-specific routines to support <asm/atomic.h>. */ | ||
25 | int _atomic_xchg(atomic_t *v, int n); | ||
26 | int _atomic_xchg_add(atomic_t *v, int i); | ||
27 | int _atomic_xchg_add_unless(atomic_t *v, int a, int u); | ||
28 | int _atomic_cmpxchg(atomic_t *v, int o, int n); | ||
29 | |||
30 | /** | ||
31 | * atomic_xchg - atomically exchange contents of memory with a new value | ||
32 | * @v: pointer of type atomic_t | ||
33 | * @i: integer value to store in memory | ||
34 | * | ||
35 | * Atomically sets @v to @i and returns old @v | ||
36 | */ | ||
37 | static inline int atomic_xchg(atomic_t *v, int n) | ||
38 | { | ||
39 | smp_mb(); /* barrier for proper semantics */ | ||
40 | return _atomic_xchg(v, n); | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * atomic_cmpxchg - atomically exchange contents of memory if it matches | ||
45 | * @v: pointer of type atomic_t | ||
46 | * @o: old value that memory should have | ||
47 | * @n: new value to write to memory if it matches | ||
48 | * | ||
49 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
50 | * Returns the old value at @v. | ||
51 | */ | ||
52 | static inline int atomic_cmpxchg(atomic_t *v, int o, int n) | ||
53 | { | ||
54 | smp_mb(); /* barrier for proper semantics */ | ||
55 | return _atomic_cmpxchg(v, o, n); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * atomic_add - add integer to atomic variable | ||
60 | * @i: integer value to add | ||
61 | * @v: pointer of type atomic_t | ||
62 | * | ||
63 | * Atomically adds @i to @v. | ||
64 | */ | ||
65 | static inline void atomic_add(int i, atomic_t *v) | ||
66 | { | ||
67 | _atomic_xchg_add(v, i); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * atomic_add_return - add integer and return | ||
72 | * @v: pointer of type atomic_t | ||
73 | * @i: integer value to add | ||
74 | * | ||
75 | * Atomically adds @i to @v and returns @i + @v | ||
76 | */ | ||
77 | static inline int atomic_add_return(int i, atomic_t *v) | ||
78 | { | ||
79 | smp_mb(); /* barrier for proper semantics */ | ||
80 | return _atomic_xchg_add(v, i) + i; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * atomic_add_unless - add unless the number is already a given value | ||
85 | * @v: pointer of type atomic_t | ||
86 | * @a: the amount to add to v... | ||
87 | * @u: ...unless v is equal to u. | ||
88 | * | ||
89 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
90 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
91 | */ | ||
92 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
93 | { | ||
94 | smp_mb(); /* barrier for proper semantics */ | ||
95 | return _atomic_xchg_add_unless(v, a, u) != u; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * atomic_set - set atomic variable | ||
100 | * @v: pointer of type atomic_t | ||
101 | * @i: required value | ||
102 | * | ||
103 | * Atomically sets the value of @v to @i. | ||
104 | * | ||
105 | * atomic_set() can't be just a raw store, since it would be lost if it | ||
106 | * fell between the load and store of one of the other atomic ops. | ||
107 | */ | ||
108 | static inline void atomic_set(atomic_t *v, int n) | ||
109 | { | ||
110 | _atomic_xchg(v, n); | ||
111 | } | ||
112 | |||
113 | #define xchg(ptr, x) ((typeof(*(ptr))) \ | ||
114 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
115 | atomic_xchg((atomic_t *)(ptr), (long)(x)) : \ | ||
116 | __xchg_called_with_bad_pointer())) | ||
117 | |||
118 | #define cmpxchg(ptr, o, n) ((typeof(*(ptr))) \ | ||
119 | ((sizeof(*(ptr)) == sizeof(atomic_t)) ? \ | ||
120 | atomic_cmpxchg((atomic_t *)(ptr), (long)(o), (long)(n)) : \ | ||
121 | __cmpxchg_called_with_bad_pointer())) | ||
122 | |||
123 | /* A 64bit atomic type */ | ||
124 | |||
125 | typedef struct { | ||
126 | u64 __aligned(8) counter; | ||
127 | } atomic64_t; | ||
128 | |||
129 | #define ATOMIC64_INIT(val) { (val) } | ||
130 | |||
131 | u64 _atomic64_xchg(atomic64_t *v, u64 n); | ||
132 | u64 _atomic64_xchg_add(atomic64_t *v, u64 i); | ||
133 | u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u); | ||
134 | u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n); | ||
135 | |||
136 | /** | ||
137 | * atomic64_read - read atomic variable | ||
138 | * @v: pointer of type atomic64_t | ||
139 | * | ||
140 | * Atomically reads the value of @v. | ||
141 | */ | ||
142 | static inline u64 atomic64_read(const atomic64_t *v) | ||
143 | { | ||
144 | /* | ||
145 | * Requires an atomic op to read both 32-bit parts consistently. | ||
146 | * Casting away const is safe since the atomic support routines | ||
147 | * do not write to memory if the value has not been modified. | ||
148 | */ | ||
149 | return _atomic64_xchg_add((atomic64_t *)v, 0); | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * atomic64_xchg - atomically exchange contents of memory with a new value | ||
154 | * @v: pointer of type atomic64_t | ||
155 | * @i: integer value to store in memory | ||
156 | * | ||
157 | * Atomically sets @v to @i and returns old @v | ||
158 | */ | ||
159 | static inline u64 atomic64_xchg(atomic64_t *v, u64 n) | ||
160 | { | ||
161 | smp_mb(); /* barrier for proper semantics */ | ||
162 | return _atomic64_xchg(v, n); | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * atomic64_cmpxchg - atomically exchange contents of memory if it matches | ||
167 | * @v: pointer of type atomic64_t | ||
168 | * @o: old value that memory should have | ||
169 | * @n: new value to write to memory if it matches | ||
170 | * | ||
171 | * Atomically checks if @v holds @o and replaces it with @n if so. | ||
172 | * Returns the old value at @v. | ||
173 | */ | ||
174 | static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) | ||
175 | { | ||
176 | smp_mb(); /* barrier for proper semantics */ | ||
177 | return _atomic64_cmpxchg(v, o, n); | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * atomic64_add - add integer to atomic variable | ||
182 | * @i: integer value to add | ||
183 | * @v: pointer of type atomic64_t | ||
184 | * | ||
185 | * Atomically adds @i to @v. | ||
186 | */ | ||
187 | static inline void atomic64_add(u64 i, atomic64_t *v) | ||
188 | { | ||
189 | _atomic64_xchg_add(v, i); | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * atomic64_add_return - add integer and return | ||
194 | * @v: pointer of type atomic64_t | ||
195 | * @i: integer value to add | ||
196 | * | ||
197 | * Atomically adds @i to @v and returns @i + @v | ||
198 | */ | ||
199 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | ||
200 | { | ||
201 | smp_mb(); /* barrier for proper semantics */ | ||
202 | return _atomic64_xchg_add(v, i) + i; | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * atomic64_add_unless - add unless the number is already a given value | ||
207 | * @v: pointer of type atomic64_t | ||
208 | * @a: the amount to add to v... | ||
209 | * @u: ...unless v is equal to u. | ||
210 | * | ||
211 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
212 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
213 | */ | ||
214 | static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | ||
215 | { | ||
216 | smp_mb(); /* barrier for proper semantics */ | ||
217 | return _atomic64_xchg_add_unless(v, a, u) != u; | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * atomic64_set - set atomic variable | ||
222 | * @v: pointer of type atomic64_t | ||
223 | * @i: required value | ||
224 | * | ||
225 | * Atomically sets the value of @v to @i. | ||
226 | * | ||
227 | * atomic64_set() can't be just a raw store, since it would be lost if it | ||
228 | * fell between the load and store of one of the other atomic ops. | ||
229 | */ | ||
230 | static inline void atomic64_set(atomic64_t *v, u64 n) | ||
231 | { | ||
232 | _atomic64_xchg(v, n); | ||
233 | } | ||
234 | |||
235 | #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) | ||
236 | #define atomic64_inc(v) atomic64_add(1LL, (v)) | ||
237 | #define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) | ||
238 | #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) | ||
239 | #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) | ||
240 | #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) | ||
241 | #define atomic64_sub(i, v) atomic64_add(-(i), (v)) | ||
242 | #define atomic64_dec(v) atomic64_sub(1LL, (v)) | ||
243 | #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) | ||
244 | #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) | ||
245 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) | ||
246 | |||
247 | /* | ||
248 | * We need to barrier before modifying the word, since the _atomic_xxx() | ||
249 | * routines just tns the lock and then read/modify/write of the word. | ||
250 | * But after the word is updated, the routine issues an "mf" before returning, | ||
251 | * and since it's a function call, we don't even need a compiler barrier. | ||
252 | */ | ||
253 | #define smp_mb__before_atomic_dec() smp_mb() | ||
254 | #define smp_mb__before_atomic_inc() smp_mb() | ||
255 | #define smp_mb__after_atomic_dec() do { } while (0) | ||
256 | #define smp_mb__after_atomic_inc() do { } while (0) | ||
257 | |||
258 | |||
259 | /* | ||
260 | * Support "tns" atomic integers. These are atomic integers that can | ||
261 | * hold any value but "1". They are more efficient than regular atomic | ||
262 | * operations because the "lock" (aka acquire) step is a single "tns" | ||
263 | * in the uncontended case, and the "unlock" (aka release) step is a | ||
264 | * single "store" without an mf. (However, note that on tilepro the | ||
265 | * "tns" will evict the local cache line, so it's not all upside.) | ||
266 | * | ||
267 | * Note that you can ONLY observe the value stored in the pointer | ||
268 | * using these operations; a direct read of the value may confusingly | ||
269 | * return the special value "1". | ||
270 | */ | ||
271 | |||
272 | int __tns_atomic_acquire(atomic_t *); | ||
273 | void __tns_atomic_release(atomic_t *p, int v); | ||
274 | |||
275 | static inline void tns_atomic_set(atomic_t *v, int i) | ||
276 | { | ||
277 | __tns_atomic_acquire(v); | ||
278 | __tns_atomic_release(v, i); | ||
279 | } | ||
280 | |||
281 | static inline int tns_atomic_cmpxchg(atomic_t *v, int o, int n) | ||
282 | { | ||
283 | int ret = __tns_atomic_acquire(v); | ||
284 | __tns_atomic_release(v, (ret == o) ? n : ret); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static inline int tns_atomic_xchg(atomic_t *v, int n) | ||
289 | { | ||
290 | int ret = __tns_atomic_acquire(v); | ||
291 | __tns_atomic_release(v, n); | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | #endif /* !__ASSEMBLY__ */ | ||
296 | |||
297 | /* | ||
298 | * Internal definitions only beyond this point. | ||
299 | */ | ||
300 | |||
301 | #define ATOMIC_LOCKS_FOUND_VIA_TABLE() \ | ||
302 | (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP)) | ||
303 | |||
304 | #if ATOMIC_LOCKS_FOUND_VIA_TABLE() | ||
305 | |||
306 | /* Number of entries in atomic_lock_ptr[]. */ | ||
307 | #define ATOMIC_HASH_L1_SHIFT 6 | ||
308 | #define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT) | ||
309 | |||
310 | /* Number of locks in each struct pointed to by atomic_lock_ptr[]. */ | ||
311 | #define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2) | ||
312 | #define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT) | ||
313 | |||
314 | #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
315 | |||
316 | /* | ||
317 | * Number of atomic locks in atomic_locks[]. Must be a power of two. | ||
318 | * There is no reason for more than PAGE_SIZE / 8 entries, since that | ||
319 | * is the maximum number of pointer bits we can use to index this. | ||
320 | * And we cannot have more than PAGE_SIZE / 4, since this has to | ||
321 | * fit on a single page and each entry takes 4 bytes. | ||
322 | */ | ||
323 | #define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3) | ||
324 | #define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT) | ||
325 | |||
326 | #ifndef __ASSEMBLY__ | ||
327 | extern int atomic_locks[]; | ||
328 | #endif | ||
329 | |||
330 | #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ | ||
331 | |||
332 | /* | ||
333 | * All the code that may fault while holding an atomic lock must | ||
334 | * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code | ||
335 | * can correctly release and reacquire the lock. Note that we | ||
336 | * mention the register number in a comment in "lib/atomic_asm.S" to help | ||
337 | * assembly coders from using this register by mistake, so if it | ||
338 | * is changed here, change that comment as well. | ||
339 | */ | ||
340 | #define ATOMIC_LOCK_REG 20 | ||
341 | #define ATOMIC_LOCK_REG_NAME r20 | ||
342 | |||
343 | #ifndef __ASSEMBLY__ | ||
344 | /* Called from setup to initialize a hash table to point to per_cpu locks. */ | ||
345 | void __init_atomic_per_cpu(void); | ||
346 | |||
347 | #ifdef CONFIG_SMP | ||
348 | /* Support releasing the atomic lock in do_page_fault_ics(). */ | ||
349 | void __atomic_fault_unlock(int *lock_ptr); | ||
350 | #endif | ||
351 | #endif /* !__ASSEMBLY__ */ | ||
352 | |||
353 | #endif /* _ASM_TILE_ATOMIC_32_H */ | ||
diff --git a/arch/tile/include/asm/auxvec.h b/arch/tile/include/asm/auxvec.h new file mode 100644 index 000000000000..1d393edb0641 --- /dev/null +++ b/arch/tile/include/asm/auxvec.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_AUXVEC_H | ||
16 | #define _ASM_TILE_AUXVEC_H | ||
17 | |||
18 | /* No extensions to auxvec */ | ||
19 | |||
20 | #endif /* _ASM_TILE_AUXVEC_H */ | ||
diff --git a/arch/tile/include/asm/backtrace.h b/arch/tile/include/asm/backtrace.h new file mode 100644 index 000000000000..6970bfcad549 --- /dev/null +++ b/arch/tile/include/asm/backtrace.h | |||
@@ -0,0 +1,193 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _TILE_BACKTRACE_H | ||
16 | #define _TILE_BACKTRACE_H | ||
17 | |||
18 | |||
19 | |||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #if CHIP_VA_WIDTH() > 32 | ||
25 | typedef unsigned long long VirtualAddress; | ||
26 | #else | ||
27 | typedef unsigned int VirtualAddress; | ||
28 | #endif | ||
29 | |||
30 | |||
31 | /** Reads 'size' bytes from 'address' and writes the data to 'result'. | ||
32 | * Returns true if successful, else false (e.g. memory not readable). | ||
33 | */ | ||
34 | typedef bool (*BacktraceMemoryReader)(void *result, | ||
35 | VirtualAddress address, | ||
36 | unsigned int size, | ||
37 | void *extra); | ||
38 | |||
39 | typedef struct { | ||
40 | /** Current PC. */ | ||
41 | VirtualAddress pc; | ||
42 | |||
43 | /** Current stack pointer value. */ | ||
44 | VirtualAddress sp; | ||
45 | |||
46 | /** Current frame pointer value (i.e. caller's stack pointer) */ | ||
47 | VirtualAddress fp; | ||
48 | |||
49 | /** Internal use only: caller's PC for first frame. */ | ||
50 | VirtualAddress initial_frame_caller_pc; | ||
51 | |||
52 | /** Internal use only: callback to read memory. */ | ||
53 | BacktraceMemoryReader read_memory_func; | ||
54 | |||
55 | /** Internal use only: arbitrary argument to read_memory_func. */ | ||
56 | void *read_memory_func_extra; | ||
57 | |||
58 | } BacktraceIterator; | ||
59 | |||
60 | |||
61 | /** Initializes a backtracer to start from the given location. | ||
62 | * | ||
63 | * If the frame pointer cannot be determined it is set to -1. | ||
64 | * | ||
65 | * @param state The state to be filled in. | ||
66 | * @param read_memory_func A callback that reads memory. If NULL, a default | ||
67 | * value is provided. | ||
68 | * @param read_memory_func_extra An arbitrary argument to read_memory_func. | ||
69 | * @param pc The current PC. | ||
70 | * @param lr The current value of the 'lr' register. | ||
71 | * @param sp The current value of the 'sp' register. | ||
72 | * @param r52 The current value of the 'r52' register. | ||
73 | */ | ||
74 | extern void backtrace_init(BacktraceIterator *state, | ||
75 | BacktraceMemoryReader read_memory_func, | ||
76 | void *read_memory_func_extra, | ||
77 | VirtualAddress pc, VirtualAddress lr, | ||
78 | VirtualAddress sp, VirtualAddress r52); | ||
79 | |||
80 | |||
81 | /** Advances the backtracing state to the calling frame, returning | ||
82 | * true iff successful. | ||
83 | */ | ||
84 | extern bool backtrace_next(BacktraceIterator *state); | ||
85 | |||
86 | |||
87 | typedef enum { | ||
88 | |||
89 | /* We have no idea what the caller's pc is. */ | ||
90 | PC_LOC_UNKNOWN, | ||
91 | |||
92 | /* The caller's pc is currently in lr. */ | ||
93 | PC_LOC_IN_LR, | ||
94 | |||
95 | /* The caller's pc can be found by dereferencing the caller's sp. */ | ||
96 | PC_LOC_ON_STACK | ||
97 | |||
98 | } CallerPCLocation; | ||
99 | |||
100 | |||
101 | typedef enum { | ||
102 | |||
103 | /* We have no idea what the caller's sp is. */ | ||
104 | SP_LOC_UNKNOWN, | ||
105 | |||
106 | /* The caller's sp is currently in r52. */ | ||
107 | SP_LOC_IN_R52, | ||
108 | |||
109 | /* The caller's sp can be found by adding a certain constant | ||
110 | * to the current value of sp. | ||
111 | */ | ||
112 | SP_LOC_OFFSET | ||
113 | |||
114 | } CallerSPLocation; | ||
115 | |||
116 | |||
117 | /* Bit values ORed into CALLER_* values for info ops. */ | ||
118 | enum { | ||
119 | /* Setting the low bit on any of these values means the info op | ||
120 | * applies only to one bundle ago. | ||
121 | */ | ||
122 | ONE_BUNDLE_AGO_FLAG = 1, | ||
123 | |||
124 | /* Setting this bit on a CALLER_SP_* value means the PC is in LR. | ||
125 | * If not set, PC is on the stack. | ||
126 | */ | ||
127 | PC_IN_LR_FLAG = 2, | ||
128 | |||
129 | /* This many of the low bits of a CALLER_SP_* value are for the | ||
130 | * flag bits above. | ||
131 | */ | ||
132 | NUM_INFO_OP_FLAGS = 2, | ||
133 | |||
134 | /* We cannot have one in the memory pipe so this is the maximum. */ | ||
135 | MAX_INFO_OPS_PER_BUNDLE = 2 | ||
136 | }; | ||
137 | |||
138 | |||
139 | /** Internal constants used to define 'info' operands. */ | ||
140 | enum { | ||
141 | /* 0 and 1 are reserved, as are all negative numbers. */ | ||
142 | |||
143 | CALLER_UNKNOWN_BASE = 2, | ||
144 | |||
145 | CALLER_SP_IN_R52_BASE = 4, | ||
146 | |||
147 | CALLER_SP_OFFSET_BASE = 8 | ||
148 | }; | ||
149 | |||
150 | |||
151 | /** Current backtracer state describing where it thinks the caller is. */ | ||
152 | typedef struct { | ||
153 | /* | ||
154 | * Public fields | ||
155 | */ | ||
156 | |||
157 | /* How do we find the caller's PC? */ | ||
158 | CallerPCLocation pc_location : 8; | ||
159 | |||
160 | /* How do we find the caller's SP? */ | ||
161 | CallerSPLocation sp_location : 8; | ||
162 | |||
163 | /* If sp_location == SP_LOC_OFFSET, then caller_sp == sp + | ||
164 | * loc->sp_offset. Else this field is undefined. | ||
165 | */ | ||
166 | uint16_t sp_offset; | ||
167 | |||
168 | /* In the most recently visited bundle a terminating bundle? */ | ||
169 | bool at_terminating_bundle; | ||
170 | |||
171 | /* | ||
172 | * Private fields | ||
173 | */ | ||
174 | |||
175 | /* Will the forward scanner see someone clobbering sp | ||
176 | * (i.e. changing it with something other than addi sp, sp, N?) | ||
177 | */ | ||
178 | bool sp_clobber_follows; | ||
179 | |||
180 | /* Operand to next "visible" info op (no more than one bundle past | ||
181 | * the next terminating bundle), or -32768 if none. | ||
182 | */ | ||
183 | int16_t next_info_operand; | ||
184 | |||
185 | /* Is the info of in next_info_op in the very next bundle? */ | ||
186 | bool is_next_info_operand_adjacent; | ||
187 | |||
188 | } CallerLocation; | ||
189 | |||
190 | |||
191 | |||
192 | |||
193 | #endif /* _TILE_BACKTRACE_H */ | ||
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h new file mode 100644 index 000000000000..84600f3514da --- /dev/null +++ b/arch/tile/include/asm/bitops.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 1992, Linus Torvalds. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_BITOPS_H | ||
17 | #define _ASM_TILE_BITOPS_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | |||
21 | #ifndef _LINUX_BITOPS_H | ||
22 | #error only <linux/bitops.h> can be included directly | ||
23 | #endif | ||
24 | |||
25 | #ifdef __tilegx__ | ||
26 | #include <asm/bitops_64.h> | ||
27 | #else | ||
28 | #include <asm/bitops_32.h> | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * __ffs - find first set bit in word | ||
33 | * @word: The word to search | ||
34 | * | ||
35 | * Undefined if no set bit exists, so code should check against 0 first. | ||
36 | */ | ||
37 | static inline unsigned long __ffs(unsigned long word) | ||
38 | { | ||
39 | return __builtin_ctzl(word); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * ffz - find first zero bit in word | ||
44 | * @word: The word to search | ||
45 | * | ||
46 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
47 | */ | ||
48 | static inline unsigned long ffz(unsigned long word) | ||
49 | { | ||
50 | return __builtin_ctzl(~word); | ||
51 | } | ||
52 | |||
53 | /** | ||
54 | * __fls - find last set bit in word | ||
55 | * @word: The word to search | ||
56 | * | ||
57 | * Undefined if no set bit exists, so code should check against 0 first. | ||
58 | */ | ||
59 | static inline unsigned long __fls(unsigned long word) | ||
60 | { | ||
61 | return (sizeof(word) * 8) - 1 - __builtin_clzl(word); | ||
62 | } | ||
63 | |||
64 | /** | ||
65 | * ffs - find first set bit in word | ||
66 | * @x: the word to search | ||
67 | * | ||
68 | * This is defined the same way as the libc and compiler builtin ffs | ||
69 | * routines, therefore differs in spirit from the other bitops. | ||
70 | * | ||
71 | * ffs(value) returns 0 if value is 0 or the position of the first | ||
72 | * set bit if value is nonzero. The first (least significant) bit | ||
73 | * is at position 1. | ||
74 | */ | ||
75 | static inline int ffs(int x) | ||
76 | { | ||
77 | return __builtin_ffs(x); | ||
78 | } | ||
79 | |||
80 | /** | ||
81 | * fls - find last set bit in word | ||
82 | * @x: the word to search | ||
83 | * | ||
84 | * This is defined in a similar way as the libc and compiler builtin | ||
85 | * ffs, but returns the position of the most significant set bit. | ||
86 | * | ||
87 | * fls(value) returns 0 if value is 0 or the position of the last | ||
88 | * set bit if value is nonzero. The last (most significant) bit is | ||
89 | * at position 32. | ||
90 | */ | ||
91 | static inline int fls(int x) | ||
92 | { | ||
93 | return (sizeof(int) * 8) - __builtin_clz(x); | ||
94 | } | ||
95 | |||
96 | static inline int fls64(__u64 w) | ||
97 | { | ||
98 | return (sizeof(__u64) * 8) - __builtin_clzll(w); | ||
99 | } | ||
100 | |||
101 | static inline unsigned int hweight32(unsigned int w) | ||
102 | { | ||
103 | return __builtin_popcount(w); | ||
104 | } | ||
105 | |||
106 | static inline unsigned int hweight16(unsigned int w) | ||
107 | { | ||
108 | return __builtin_popcount(w & 0xffff); | ||
109 | } | ||
110 | |||
111 | static inline unsigned int hweight8(unsigned int w) | ||
112 | { | ||
113 | return __builtin_popcount(w & 0xff); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long hweight64(__u64 w) | ||
117 | { | ||
118 | return __builtin_popcountll(w); | ||
119 | } | ||
120 | |||
121 | #include <asm-generic/bitops/lock.h> | ||
122 | #include <asm-generic/bitops/sched.h> | ||
123 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
124 | #include <asm-generic/bitops/minix.h> | ||
125 | |||
126 | #endif /* _ASM_TILE_BITOPS_H */ | ||
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h new file mode 100644 index 000000000000..7a93c001ac19 --- /dev/null +++ b/arch/tile/include/asm/bitops_32.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITOPS_32_H | ||
16 | #define _ASM_TILE_BITOPS_32_H | ||
17 | |||
18 | #include <linux/compiler.h> | ||
19 | #include <asm/atomic.h> | ||
20 | #include <asm/system.h> | ||
21 | |||
22 | /* Tile-specific routines to support <asm/bitops.h>. */ | ||
23 | unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); | ||
24 | unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask); | ||
25 | unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask); | ||
26 | |||
27 | /** | ||
28 | * set_bit - Atomically set a bit in memory | ||
29 | * @nr: the bit to set | ||
30 | * @addr: the address to start counting from | ||
31 | * | ||
32 | * This function is atomic and may not be reordered. | ||
33 | * See __set_bit() if you do not require the atomic guarantees. | ||
34 | * Note that @nr may be almost arbitrarily large; this function is not | ||
35 | * restricted to acting on a single-word quantity. | ||
36 | */ | ||
37 | static inline void set_bit(unsigned nr, volatile unsigned long *addr) | ||
38 | { | ||
39 | _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
40 | } | ||
41 | |||
42 | /** | ||
43 | * clear_bit - Clears a bit in memory | ||
44 | * @nr: Bit to clear | ||
45 | * @addr: Address to start counting from | ||
46 | * | ||
47 | * clear_bit() is atomic and may not be reordered. | ||
48 | * See __clear_bit() if you do not require the atomic guarantees. | ||
49 | * Note that @nr may be almost arbitrarily large; this function is not | ||
50 | * restricted to acting on a single-word quantity. | ||
51 | * | ||
52 | * clear_bit() may not contain a memory barrier, so if it is used for | ||
53 | * locking purposes, you should call smp_mb__before_clear_bit() and/or | ||
54 | * smp_mb__after_clear_bit() to ensure changes are visible on other cpus. | ||
55 | */ | ||
56 | static inline void clear_bit(unsigned nr, volatile unsigned long *addr) | ||
57 | { | ||
58 | _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * change_bit - Toggle a bit in memory | ||
63 | * @nr: Bit to change | ||
64 | * @addr: Address to start counting from | ||
65 | * | ||
66 | * change_bit() is atomic and may not be reordered. | ||
67 | * See __change_bit() if you do not require the atomic guarantees. | ||
68 | * Note that @nr may be almost arbitrarily large; this function is not | ||
69 | * restricted to acting on a single-word quantity. | ||
70 | */ | ||
71 | static inline void change_bit(unsigned nr, volatile unsigned long *addr) | ||
72 | { | ||
73 | _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr)); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * test_and_set_bit - Set a bit and return its old value | ||
78 | * @nr: Bit to set | ||
79 | * @addr: Address to count from | ||
80 | * | ||
81 | * This operation is atomic and cannot be reordered. | ||
82 | * It also implies a memory barrier. | ||
83 | */ | ||
84 | static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr) | ||
85 | { | ||
86 | unsigned long mask = BIT_MASK(nr); | ||
87 | addr += BIT_WORD(nr); | ||
88 | smp_mb(); /* barrier for proper semantics */ | ||
89 | return (_atomic_or(addr, mask) & mask) != 0; | ||
90 | } | ||
91 | |||
92 | /** | ||
93 | * test_and_clear_bit - Clear a bit and return its old value | ||
94 | * @nr: Bit to clear | ||
95 | * @addr: Address to count from | ||
96 | * | ||
97 | * This operation is atomic and cannot be reordered. | ||
98 | * It also implies a memory barrier. | ||
99 | */ | ||
100 | static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr) | ||
101 | { | ||
102 | unsigned long mask = BIT_MASK(nr); | ||
103 | addr += BIT_WORD(nr); | ||
104 | smp_mb(); /* barrier for proper semantics */ | ||
105 | return (_atomic_andn(addr, mask) & mask) != 0; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * test_and_change_bit - Change a bit and return its old value | ||
110 | * @nr: Bit to change | ||
111 | * @addr: Address to count from | ||
112 | * | ||
113 | * This operation is atomic and cannot be reordered. | ||
114 | * It also implies a memory barrier. | ||
115 | */ | ||
116 | static inline int test_and_change_bit(unsigned nr, | ||
117 | volatile unsigned long *addr) | ||
118 | { | ||
119 | unsigned long mask = BIT_MASK(nr); | ||
120 | addr += BIT_WORD(nr); | ||
121 | smp_mb(); /* barrier for proper semantics */ | ||
122 | return (_atomic_xor(addr, mask) & mask) != 0; | ||
123 | } | ||
124 | |||
125 | /* See discussion at smp_mb__before_atomic_dec() in <asm/atomic.h>. */ | ||
126 | #define smp_mb__before_clear_bit() smp_mb() | ||
127 | #define smp_mb__after_clear_bit() do {} while (0) | ||
128 | |||
129 | #include <asm-generic/bitops/non-atomic.h> | ||
130 | #include <asm-generic/bitops/ext2-atomic.h> | ||
131 | |||
132 | #endif /* _ASM_TILE_BITOPS_32_H */ | ||
diff --git a/arch/tile/include/asm/bitsperlong.h b/arch/tile/include/asm/bitsperlong.h new file mode 100644 index 000000000000..58c771f2af2f --- /dev/null +++ b/arch/tile/include/asm/bitsperlong.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_BITSPERLONG_H | ||
16 | #define _ASM_TILE_BITSPERLONG_H | ||
17 | |||
18 | #ifdef __LP64__ | ||
19 | # define __BITS_PER_LONG 64 | ||
20 | #else | ||
21 | # define __BITS_PER_LONG 32 | ||
22 | #endif | ||
23 | |||
24 | #include <asm-generic/bitsperlong.h> | ||
25 | |||
26 | #endif /* _ASM_TILE_BITSPERLONG_H */ | ||
diff --git a/arch/tile/include/asm/bug.h b/arch/tile/include/asm/bug.h new file mode 100644 index 000000000000..b12fd89e42e9 --- /dev/null +++ b/arch/tile/include/asm/bug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bug.h> | |||
diff --git a/arch/tile/include/asm/bugs.h b/arch/tile/include/asm/bugs.h new file mode 100644 index 000000000000..61791e1ad9f5 --- /dev/null +++ b/arch/tile/include/asm/bugs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/bugs.h> | |||
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h new file mode 100644 index 000000000000..9558416d578b --- /dev/null +++ b/arch/tile/include/asm/byteorder.h | |||
@@ -0,0 +1 @@ | |||
#include <linux/byteorder/little_endian.h> | |||
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h new file mode 100644 index 000000000000..c2b7dcfe5327 --- /dev/null +++ b/arch/tile/include/asm/cache.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHE_H | ||
16 | #define _ASM_TILE_CACHE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* bytes per L1 data cache line */ | ||
21 | #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE() | ||
22 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) | ||
23 | #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1)) & -L1_CACHE_BYTES) | ||
24 | |||
25 | /* bytes per L1 instruction cache line */ | ||
26 | #define L1I_CACHE_SHIFT CHIP_L1I_LOG_LINE_SIZE() | ||
27 | #define L1I_CACHE_BYTES (1 << L1I_CACHE_SHIFT) | ||
28 | #define L1I_CACHE_ALIGN(x) (((x)+(L1I_CACHE_BYTES-1)) & -L1I_CACHE_BYTES) | ||
29 | |||
30 | /* bytes per L2 cache line */ | ||
31 | #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE() | ||
32 | #define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) | ||
33 | #define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES) | ||
34 | |||
35 | /* use the cache line size for the L2, which is where it counts */ | ||
36 | #define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT | ||
37 | #define SMP_CACHE_BYTES L2_CACHE_BYTES | ||
38 | #define INTERNODE_CACHE_SHIFT L2_CACHE_SHIFT | ||
39 | #define INTERNODE_CACHE_BYTES L2_CACHE_BYTES | ||
40 | |||
41 | /* Group together read-mostly things to avoid cache false sharing */ | ||
42 | #define __read_mostly __attribute__((__section__(".data.read_mostly"))) | ||
43 | |||
44 | /* | ||
45 | * Attribute for data that is kept read/write coherent until the end of | ||
46 | * initialization, then bumped to read/only incoherent for performance. | ||
47 | */ | ||
48 | #define __write_once __attribute__((__section__(".w1data"))) | ||
49 | |||
50 | #endif /* _ASM_TILE_CACHE_H */ | ||
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h new file mode 100644 index 000000000000..7e2096a4ef7d --- /dev/null +++ b/arch/tile/include/asm/cacheflush.h | |||
@@ -0,0 +1,145 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CACHEFLUSH_H | ||
16 | #define _ASM_TILE_CACHEFLUSH_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | /* Keep includes the same across arches. */ | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/cache.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | /* Caches are physically-indexed and so don't need special treatment */ | ||
26 | #define flush_cache_all() do { } while (0) | ||
27 | #define flush_cache_mm(mm) do { } while (0) | ||
28 | #define flush_cache_dup_mm(mm) do { } while (0) | ||
29 | #define flush_cache_range(vma, start, end) do { } while (0) | ||
30 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | ||
31 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
32 | #define flush_dcache_page(page) do { } while (0) | ||
33 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | ||
34 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | ||
35 | #define flush_cache_vmap(start, end) do { } while (0) | ||
36 | #define flush_cache_vunmap(start, end) do { } while (0) | ||
37 | #define flush_icache_page(vma, pg) do { } while (0) | ||
38 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) | ||
39 | |||
40 | /* See "arch/tile/lib/__invalidate_icache.S". */ | ||
41 | extern void __invalidate_icache(unsigned long start, unsigned long size); | ||
42 | |||
43 | /* Flush the icache just on this cpu */ | ||
44 | static inline void __flush_icache_range(unsigned long start, unsigned long end) | ||
45 | { | ||
46 | __invalidate_icache(start, end - start); | ||
47 | } | ||
48 | |||
49 | /* Flush the entire icache on this cpu. */ | ||
50 | #define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE()) | ||
51 | |||
52 | #ifdef CONFIG_SMP | ||
53 | /* | ||
54 | * When the kernel writes to its own text we need to do an SMP | ||
55 | * broadcast to make the L1I coherent everywhere. This includes | ||
56 | * module load and single step. | ||
57 | */ | ||
58 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
59 | #else | ||
60 | #define flush_icache_range __flush_icache_range | ||
61 | #endif | ||
62 | |||
63 | /* | ||
64 | * An update to an executable user page requires icache flushing. | ||
65 | * We could carefully update only tiles that are running this process, | ||
66 | * and rely on the fact that we flush the icache on every context | ||
67 | * switch to avoid doing extra work here. But for now, I'll be | ||
68 | * conservative and just do a global icache flush. | ||
69 | */ | ||
70 | static inline void copy_to_user_page(struct vm_area_struct *vma, | ||
71 | struct page *page, unsigned long vaddr, | ||
72 | void *dst, void *src, int len) | ||
73 | { | ||
74 | memcpy(dst, src, len); | ||
75 | if (vma->vm_flags & VM_EXEC) { | ||
76 | flush_icache_range((unsigned long) dst, | ||
77 | (unsigned long) dst + len); | ||
78 | } | ||
79 | } | ||
80 | |||
81 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | ||
82 | memcpy((dst), (src), (len)) | ||
83 | |||
84 | /* | ||
85 | * Invalidate a VA range; pads to L2 cacheline boundaries. | ||
86 | * | ||
87 | * Note that on TILE64, __inv_buffer() actually flushes modified | ||
88 | * cache lines in addition to invalidating them, i.e., it's the | ||
89 | * same as __finv_buffer(). | ||
90 | */ | ||
91 | static inline void __inv_buffer(void *buffer, size_t size) | ||
92 | { | ||
93 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
94 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
95 | while (next < finish) { | ||
96 | __insn_inv(next); | ||
97 | next += CHIP_INV_STRIDE(); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* Flush a VA range; pads to L2 cacheline boundaries. */ | ||
102 | static inline void __flush_buffer(void *buffer, size_t size) | ||
103 | { | ||
104 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
105 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
106 | while (next < finish) { | ||
107 | __insn_flush(next); | ||
108 | next += CHIP_FLUSH_STRIDE(); | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */ | ||
113 | static inline void __finv_buffer(void *buffer, size_t size) | ||
114 | { | ||
115 | char *next = (char *)((long)buffer & -L2_CACHE_BYTES); | ||
116 | char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size); | ||
117 | while (next < finish) { | ||
118 | __insn_finv(next); | ||
119 | next += CHIP_FINV_STRIDE(); | ||
120 | } | ||
121 | } | ||
122 | |||
123 | |||
124 | /* Invalidate a VA range, then memory fence. */ | ||
125 | static inline void inv_buffer(void *buffer, size_t size) | ||
126 | { | ||
127 | __inv_buffer(buffer, size); | ||
128 | mb_incoherent(); | ||
129 | } | ||
130 | |||
131 | /* Flush a VA range, then memory fence. */ | ||
132 | static inline void flush_buffer(void *buffer, size_t size) | ||
133 | { | ||
134 | __flush_buffer(buffer, size); | ||
135 | mb_incoherent(); | ||
136 | } | ||
137 | |||
138 | /* Flush & invalidate a VA range, then memory fence. */ | ||
139 | static inline void finv_buffer(void *buffer, size_t size) | ||
140 | { | ||
141 | __finv_buffer(buffer, size); | ||
142 | mb_incoherent(); | ||
143 | } | ||
144 | |||
145 | #endif /* _ASM_TILE_CACHEFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h new file mode 100644 index 000000000000..a120766c7264 --- /dev/null +++ b/arch/tile/include/asm/checksum.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CHECKSUM_H | ||
16 | #define _ASM_TILE_CHECKSUM_H | ||
17 | |||
18 | #include <asm-generic/checksum.h> | ||
19 | |||
20 | /* Allow us to provide a more optimized do_csum(). */ | ||
21 | __wsum do_csum(const unsigned char *buff, int len); | ||
22 | #define do_csum do_csum | ||
23 | |||
24 | #endif /* _ASM_TILE_CHECKSUM_H */ | ||
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h new file mode 100644 index 000000000000..e133c53f6c4f --- /dev/null +++ b/arch/tile/include/asm/compat.h | |||
@@ -0,0 +1,308 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_COMPAT_H | ||
16 | #define _ASM_TILE_COMPAT_H | ||
17 | |||
18 | /* | ||
19 | * Architecture specific compatibility types | ||
20 | */ | ||
21 | #include <linux/types.h> | ||
22 | #include <linux/sched.h> | ||
23 | |||
24 | #define COMPAT_USER_HZ 100 | ||
25 | |||
26 | /* "long" and pointer-based types are different. */ | ||
27 | typedef s32 compat_long_t; | ||
28 | typedef u32 compat_ulong_t; | ||
29 | typedef u32 compat_size_t; | ||
30 | typedef s32 compat_ssize_t; | ||
31 | typedef s32 compat_off_t; | ||
32 | typedef s32 compat_time_t; | ||
33 | typedef s32 compat_clock_t; | ||
34 | typedef u32 compat_ino_t; | ||
35 | typedef u32 compat_caddr_t; | ||
36 | typedef u32 compat_uptr_t; | ||
37 | |||
38 | /* Many types are "int" or otherwise the same. */ | ||
39 | typedef __kernel_pid_t compat_pid_t; | ||
40 | typedef __kernel_uid_t __compat_uid_t; | ||
41 | typedef __kernel_gid_t __compat_gid_t; | ||
42 | typedef __kernel_uid32_t __compat_uid32_t; | ||
43 | typedef __kernel_uid32_t __compat_gid32_t; | ||
44 | typedef __kernel_mode_t compat_mode_t; | ||
45 | typedef __kernel_dev_t compat_dev_t; | ||
46 | typedef __kernel_loff_t compat_loff_t; | ||
47 | typedef __kernel_nlink_t compat_nlink_t; | ||
48 | typedef __kernel_ipc_pid_t compat_ipc_pid_t; | ||
49 | typedef __kernel_daddr_t compat_daddr_t; | ||
50 | typedef __kernel_fsid_t compat_fsid_t; | ||
51 | typedef __kernel_timer_t compat_timer_t; | ||
52 | typedef __kernel_key_t compat_key_t; | ||
53 | typedef int compat_int_t; | ||
54 | typedef s64 compat_s64; | ||
55 | typedef uint compat_uint_t; | ||
56 | typedef u64 compat_u64; | ||
57 | |||
58 | /* We use the same register dump format in 32-bit images. */ | ||
59 | typedef unsigned long compat_elf_greg_t; | ||
60 | #define COMPAT_ELF_NGREG (sizeof(struct pt_regs) / sizeof(compat_elf_greg_t)) | ||
61 | typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; | ||
62 | |||
63 | struct compat_timespec { | ||
64 | compat_time_t tv_sec; | ||
65 | s32 tv_nsec; | ||
66 | }; | ||
67 | |||
68 | struct compat_timeval { | ||
69 | compat_time_t tv_sec; | ||
70 | s32 tv_usec; | ||
71 | }; | ||
72 | |||
73 | struct compat_stat { | ||
74 | unsigned int st_dev; | ||
75 | unsigned int st_ino; | ||
76 | unsigned int st_mode; | ||
77 | unsigned int st_nlink; | ||
78 | unsigned int st_uid; | ||
79 | unsigned int st_gid; | ||
80 | unsigned int st_rdev; | ||
81 | unsigned int __pad1; | ||
82 | int st_size; | ||
83 | int st_blksize; | ||
84 | int __pad2; | ||
85 | int st_blocks; | ||
86 | int st_atime; | ||
87 | unsigned int st_atime_nsec; | ||
88 | int st_mtime; | ||
89 | unsigned int st_mtime_nsec; | ||
90 | int st_ctime; | ||
91 | unsigned int st_ctime_nsec; | ||
92 | unsigned int __unused[2]; | ||
93 | }; | ||
94 | |||
95 | struct compat_stat64 { | ||
96 | unsigned long st_dev; | ||
97 | unsigned long st_ino; | ||
98 | unsigned int st_mode; | ||
99 | unsigned int st_nlink; | ||
100 | unsigned int st_uid; | ||
101 | unsigned int st_gid; | ||
102 | unsigned long st_rdev; | ||
103 | long st_size; | ||
104 | unsigned int st_blksize; | ||
105 | unsigned long st_blocks __attribute__((packed)); | ||
106 | unsigned int st_atime; | ||
107 | unsigned int st_atime_nsec; | ||
108 | unsigned int st_mtime; | ||
109 | unsigned int st_mtime_nsec; | ||
110 | unsigned int st_ctime; | ||
111 | unsigned int st_ctime_nsec; | ||
112 | unsigned int __unused8; | ||
113 | }; | ||
114 | |||
115 | #define compat_statfs statfs | ||
116 | |||
117 | struct compat_sysctl { | ||
118 | unsigned int name; | ||
119 | int nlen; | ||
120 | unsigned int oldval; | ||
121 | unsigned int oldlenp; | ||
122 | unsigned int newval; | ||
123 | unsigned int newlen; | ||
124 | unsigned int __unused[4]; | ||
125 | }; | ||
126 | |||
127 | |||
128 | struct compat_flock { | ||
129 | short l_type; | ||
130 | short l_whence; | ||
131 | compat_off_t l_start; | ||
132 | compat_off_t l_len; | ||
133 | compat_pid_t l_pid; | ||
134 | }; | ||
135 | |||
136 | #define F_GETLK64 12 /* using 'struct flock64' */ | ||
137 | #define F_SETLK64 13 | ||
138 | #define F_SETLKW64 14 | ||
139 | |||
140 | struct compat_flock64 { | ||
141 | short l_type; | ||
142 | short l_whence; | ||
143 | compat_loff_t l_start; | ||
144 | compat_loff_t l_len; | ||
145 | compat_pid_t l_pid; | ||
146 | }; | ||
147 | |||
148 | #define COMPAT_RLIM_INFINITY 0xffffffff | ||
149 | |||
150 | #define _COMPAT_NSIG 64 | ||
151 | #define _COMPAT_NSIG_BPW 32 | ||
152 | |||
153 | typedef u32 compat_sigset_word; | ||
154 | |||
155 | #define COMPAT_OFF_T_MAX 0x7fffffff | ||
156 | #define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL | ||
157 | |||
158 | struct compat_ipc64_perm { | ||
159 | compat_key_t key; | ||
160 | __compat_uid32_t uid; | ||
161 | __compat_gid32_t gid; | ||
162 | __compat_uid32_t cuid; | ||
163 | __compat_gid32_t cgid; | ||
164 | unsigned short mode; | ||
165 | unsigned short __pad1; | ||
166 | unsigned short seq; | ||
167 | unsigned short __pad2; | ||
168 | compat_ulong_t unused1; | ||
169 | compat_ulong_t unused2; | ||
170 | }; | ||
171 | |||
172 | struct compat_semid64_ds { | ||
173 | struct compat_ipc64_perm sem_perm; | ||
174 | compat_time_t sem_otime; | ||
175 | compat_ulong_t __unused1; | ||
176 | compat_time_t sem_ctime; | ||
177 | compat_ulong_t __unused2; | ||
178 | compat_ulong_t sem_nsems; | ||
179 | compat_ulong_t __unused3; | ||
180 | compat_ulong_t __unused4; | ||
181 | }; | ||
182 | |||
183 | struct compat_msqid64_ds { | ||
184 | struct compat_ipc64_perm msg_perm; | ||
185 | compat_time_t msg_stime; | ||
186 | compat_ulong_t __unused1; | ||
187 | compat_time_t msg_rtime; | ||
188 | compat_ulong_t __unused2; | ||
189 | compat_time_t msg_ctime; | ||
190 | compat_ulong_t __unused3; | ||
191 | compat_ulong_t msg_cbytes; | ||
192 | compat_ulong_t msg_qnum; | ||
193 | compat_ulong_t msg_qbytes; | ||
194 | compat_pid_t msg_lspid; | ||
195 | compat_pid_t msg_lrpid; | ||
196 | compat_ulong_t __unused4; | ||
197 | compat_ulong_t __unused5; | ||
198 | }; | ||
199 | |||
200 | struct compat_shmid64_ds { | ||
201 | struct compat_ipc64_perm shm_perm; | ||
202 | compat_size_t shm_segsz; | ||
203 | compat_time_t shm_atime; | ||
204 | compat_ulong_t __unused1; | ||
205 | compat_time_t shm_dtime; | ||
206 | compat_ulong_t __unused2; | ||
207 | compat_time_t shm_ctime; | ||
208 | compat_ulong_t __unused3; | ||
209 | compat_pid_t shm_cpid; | ||
210 | compat_pid_t shm_lpid; | ||
211 | compat_ulong_t shm_nattch; | ||
212 | compat_ulong_t __unused4; | ||
213 | compat_ulong_t __unused5; | ||
214 | }; | ||
215 | |||
216 | /* | ||
217 | * A pointer passed in from user mode. This should not | ||
218 | * be used for syscall parameters, just declare them | ||
219 | * as pointers because the syscall entry code will have | ||
220 | * appropriately converted them already. | ||
221 | */ | ||
222 | |||
223 | static inline void __user *compat_ptr(compat_uptr_t uptr) | ||
224 | { | ||
225 | return (void __user *)(unsigned long)uptr; | ||
226 | } | ||
227 | |||
228 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
229 | { | ||
230 | return (u32)(unsigned long)uptr; | ||
231 | } | ||
232 | |||
233 | /* Sign-extend when storing a kernel pointer to a user's ptregs. */ | ||
234 | static inline unsigned long ptr_to_compat_reg(void __user *uptr) | ||
235 | { | ||
236 | return (long)(int)(long)uptr; | ||
237 | } | ||
238 | |||
239 | static inline void __user *compat_alloc_user_space(long len) | ||
240 | { | ||
241 | struct pt_regs *regs = task_pt_regs(current); | ||
242 | return (void __user *)regs->sp - len; | ||
243 | } | ||
244 | |||
245 | static inline int is_compat_task(void) | ||
246 | { | ||
247 | return current_thread_info()->status & TS_COMPAT; | ||
248 | } | ||
249 | |||
250 | extern int compat_setup_rt_frame(int sig, struct k_sigaction *ka, | ||
251 | siginfo_t *info, sigset_t *set, | ||
252 | struct pt_regs *regs); | ||
253 | |||
254 | /* Compat syscalls. */ | ||
255 | struct compat_sigaction; | ||
256 | struct compat_siginfo; | ||
257 | struct compat_sigaltstack; | ||
258 | long compat_sys_execve(char __user *path, compat_uptr_t __user *argv, | ||
259 | compat_uptr_t __user *envp); | ||
260 | long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, | ||
261 | struct compat_sigaction __user *oact, | ||
262 | size_t sigsetsize); | ||
263 | long compat_sys_rt_sigqueueinfo(int pid, int sig, | ||
264 | struct compat_siginfo __user *uinfo); | ||
265 | long compat_sys_rt_sigreturn(void); | ||
266 | long compat_sys_sigaltstack(const struct compat_sigaltstack __user *uss_ptr, | ||
267 | struct compat_sigaltstack __user *uoss_ptr); | ||
268 | long compat_sys_truncate64(char __user *filename, u32 dummy, u32 low, u32 high); | ||
269 | long compat_sys_ftruncate64(unsigned int fd, u32 dummy, u32 low, u32 high); | ||
270 | long compat_sys_pread64(unsigned int fd, char __user *ubuf, size_t count, | ||
271 | u32 dummy, u32 low, u32 high); | ||
272 | long compat_sys_pwrite64(unsigned int fd, char __user *ubuf, size_t count, | ||
273 | u32 dummy, u32 low, u32 high); | ||
274 | long compat_sys_lookup_dcookie(u32 low, u32 high, char __user *buf, size_t len); | ||
275 | long compat_sys_sync_file_range2(int fd, unsigned int flags, | ||
276 | u32 offset_lo, u32 offset_hi, | ||
277 | u32 nbytes_lo, u32 nbytes_hi); | ||
278 | long compat_sys_fallocate(int fd, int mode, | ||
279 | u32 offset_lo, u32 offset_hi, | ||
280 | u32 len_lo, u32 len_hi); | ||
281 | long compat_sys_stat64(char __user *filename, | ||
282 | struct compat_stat64 __user *statbuf); | ||
283 | long compat_sys_lstat64(char __user *filename, | ||
284 | struct compat_stat64 __user *statbuf); | ||
285 | long compat_sys_fstat64(unsigned int fd, struct compat_stat64 __user *statbuf); | ||
286 | long compat_sys_fstatat64(int dfd, char __user *filename, | ||
287 | struct compat_stat64 __user *statbuf, int flag); | ||
288 | long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
289 | struct compat_timespec __user *interval); | ||
290 | ssize_t compat_sys_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | ||
291 | size_t count); | ||
292 | |||
293 | /* Versions of compat functions that differ from generic Linux. */ | ||
294 | struct compat_msgbuf; | ||
295 | long tile_compat_sys_msgsnd(int msqid, | ||
296 | struct compat_msgbuf __user *msgp, | ||
297 | size_t msgsz, int msgflg); | ||
298 | long tile_compat_sys_msgrcv(int msqid, | ||
299 | struct compat_msgbuf __user *msgp, | ||
300 | size_t msgsz, long msgtyp, int msgflg); | ||
301 | long tile_compat_sys_ptrace(compat_long_t request, compat_long_t pid, | ||
302 | compat_long_t addr, compat_long_t data); | ||
303 | |||
304 | /* Tilera Linux syscalls that don't have "compat" versions. */ | ||
305 | #define compat_sys_raise_fpe sys_raise_fpe | ||
306 | #define compat_sys_flush_cache sys_flush_cache | ||
307 | |||
308 | #endif /* _ASM_TILE_COMPAT_H */ | ||
diff --git a/arch/tile/include/asm/cputime.h b/arch/tile/include/asm/cputime.h new file mode 100644 index 000000000000..6d68ad7e0ea3 --- /dev/null +++ b/arch/tile/include/asm/cputime.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/cputime.h> | |||
diff --git a/arch/tile/include/asm/current.h b/arch/tile/include/asm/current.h new file mode 100644 index 000000000000..da21acf020d3 --- /dev/null +++ b/arch/tile/include/asm/current.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_CURRENT_H | ||
16 | #define _ASM_TILE_CURRENT_H | ||
17 | |||
18 | #include <linux/thread_info.h> | ||
19 | |||
20 | struct task_struct; | ||
21 | |||
22 | static inline struct task_struct *get_current(void) | ||
23 | { | ||
24 | return current_thread_info()->task; | ||
25 | } | ||
26 | #define current get_current() | ||
27 | |||
28 | /* Return a usable "task_struct" pointer even if the real one is corrupt. */ | ||
29 | struct task_struct *validate_current(void); | ||
30 | |||
31 | #endif /* _ASM_TILE_CURRENT_H */ | ||
diff --git a/arch/tile/include/asm/delay.h b/arch/tile/include/asm/delay.h new file mode 100644 index 000000000000..97b0e69e704e --- /dev/null +++ b/arch/tile/include/asm/delay.h | |||
@@ -0,0 +1,34 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DELAY_H | ||
16 | #define _ASM_TILE_DELAY_H | ||
17 | |||
18 | /* Undefined functions to get compile-time errors. */ | ||
19 | extern void __bad_udelay(void); | ||
20 | extern void __bad_ndelay(void); | ||
21 | |||
22 | extern void __udelay(unsigned long usecs); | ||
23 | extern void __ndelay(unsigned long nsecs); | ||
24 | extern void __delay(unsigned long loops); | ||
25 | |||
26 | #define udelay(n) (__builtin_constant_p(n) ? \ | ||
27 | ((n) > 20000 ? __bad_udelay() : __ndelay((n) * 1000)) : \ | ||
28 | __udelay(n)) | ||
29 | |||
30 | #define ndelay(n) (__builtin_constant_p(n) ? \ | ||
31 | ((n) > 20000 ? __bad_ndelay() : __ndelay(n)) : \ | ||
32 | __ndelay(n)) | ||
33 | |||
34 | #endif /* _ASM_TILE_DELAY_H */ | ||
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h new file mode 100644 index 000000000000..f0a4c256403b --- /dev/null +++ b/arch/tile/include/asm/device.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/device.h> | |||
diff --git a/arch/tile/include/asm/div64.h b/arch/tile/include/asm/div64.h new file mode 100644 index 000000000000..6cd978cefb28 --- /dev/null +++ b/arch/tile/include/asm/div64.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/div64.h> | |||
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h new file mode 100644 index 000000000000..cf466b39aa13 --- /dev/null +++ b/arch/tile/include/asm/dma-mapping.h | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_MAPPING_H | ||
16 | #define _ASM_TILE_DMA_MAPPING_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/scatterlist.h> | ||
20 | #include <linux/cache.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | /* | ||
24 | * Note that on x86 and powerpc, there is a "struct dma_mapping_ops" | ||
25 | * that is used for all the DMA operations. For now, we don't have an | ||
26 | * equivalent on tile, because we only have a single way of doing DMA. | ||
27 | * (Tilera bug 7994 to use dma_mapping_ops.) | ||
28 | */ | ||
29 | |||
30 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | ||
31 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | ||
32 | |||
33 | extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | ||
34 | enum dma_data_direction); | ||
35 | extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, | ||
36 | size_t size, enum dma_data_direction); | ||
37 | extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
38 | enum dma_data_direction); | ||
39 | extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
40 | int nhwentries, enum dma_data_direction); | ||
41 | extern dma_addr_t dma_map_page(struct device *dev, struct page *page, | ||
42 | unsigned long offset, size_t size, | ||
43 | enum dma_data_direction); | ||
44 | extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address, | ||
45 | size_t size, enum dma_data_direction); | ||
46 | extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
47 | int nelems, enum dma_data_direction); | ||
48 | extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
49 | int nelems, enum dma_data_direction); | ||
50 | |||
51 | |||
52 | void *dma_alloc_coherent(struct device *dev, size_t size, | ||
53 | dma_addr_t *dma_handle, gfp_t flag); | ||
54 | |||
55 | void dma_free_coherent(struct device *dev, size_t size, | ||
56 | void *vaddr, dma_addr_t dma_handle); | ||
57 | |||
58 | extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t, | ||
59 | enum dma_data_direction); | ||
60 | extern void dma_sync_single_for_device(struct device *, dma_addr_t, | ||
61 | size_t, enum dma_data_direction); | ||
62 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, | ||
63 | unsigned long offset, size_t, | ||
64 | enum dma_data_direction); | ||
65 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, | ||
66 | unsigned long offset, size_t, | ||
67 | enum dma_data_direction); | ||
68 | extern void dma_cache_sync(void *vaddr, size_t, enum dma_data_direction); | ||
69 | |||
70 | static inline int | ||
71 | dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static inline int | ||
77 | dma_supported(struct device *dev, u64 mask) | ||
78 | { | ||
79 | return 1; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | dma_set_mask(struct device *dev, u64 mask) | ||
84 | { | ||
85 | if (!dev->dma_mask || !dma_supported(dev, mask)) | ||
86 | return -EIO; | ||
87 | |||
88 | *dev->dma_mask = mask; | ||
89 | |||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline int | ||
94 | dma_get_cache_alignment(void) | ||
95 | { | ||
96 | return L2_CACHE_BYTES; | ||
97 | } | ||
98 | |||
99 | #define dma_is_consistent(d, h) (1) | ||
100 | |||
101 | |||
102 | #endif /* _ASM_TILE_DMA_MAPPING_H */ | ||
diff --git a/arch/tile/include/asm/dma.h b/arch/tile/include/asm/dma.h new file mode 100644 index 000000000000..12a7ca16d164 --- /dev/null +++ b/arch/tile/include/asm/dma.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_DMA_H | ||
16 | #define _ASM_TILE_DMA_H | ||
17 | |||
18 | #include <asm-generic/dma.h> | ||
19 | |||
20 | /* Needed by drivers/pci/quirks.c */ | ||
21 | #ifdef CONFIG_PCI | ||
22 | extern int isa_dma_bridge_buggy; | ||
23 | #endif | ||
24 | |||
25 | #endif /* _ASM_TILE_DMA_H */ | ||
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h new file mode 100644 index 000000000000..1bca0debdb0f --- /dev/null +++ b/arch/tile/include/asm/elf.h | |||
@@ -0,0 +1,169 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_ELF_H | ||
16 | #define _ASM_TILE_ELF_H | ||
17 | |||
18 | /* | ||
19 | * ELF register definitions. | ||
20 | */ | ||
21 | |||
22 | #include <arch/chip.h> | ||
23 | |||
24 | #include <linux/ptrace.h> | ||
25 | #include <asm/byteorder.h> | ||
26 | #include <asm/page.h> | ||
27 | |||
28 | typedef unsigned long elf_greg_t; | ||
29 | |||
30 | #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) | ||
31 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
32 | |||
33 | #define EM_TILE64 187 | ||
34 | #define EM_TILEPRO 188 | ||
35 | #define EM_TILEGX 191 | ||
36 | |||
37 | /* Provide a nominal data structure. */ | ||
38 | #define ELF_NFPREG 0 | ||
39 | typedef double elf_fpreg_t; | ||
40 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
41 | |||
42 | #ifdef __tilegx__ | ||
43 | #define ELF_CLASS ELFCLASS64 | ||
44 | #else | ||
45 | #define ELF_CLASS ELFCLASS32 | ||
46 | #endif | ||
47 | #define ELF_DATA ELFDATA2LSB | ||
48 | |||
49 | /* | ||
50 | * There seems to be a bug in how compat_binfmt_elf.c works: it | ||
51 | * #undefs ELF_ARCH, but it is then used in binfmt_elf.c for fill_note_info(). | ||
52 | * Hack around this by providing an enum value of ELF_ARCH. | ||
53 | */ | ||
54 | enum { ELF_ARCH = CHIP_ELF_TYPE() }; | ||
55 | #define ELF_ARCH ELF_ARCH | ||
56 | |||
57 | /* | ||
58 | * This is used to ensure we don't load something for the wrong architecture. | ||
59 | */ | ||
60 | #define elf_check_arch(x) \ | ||
61 | ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ | ||
62 | ((x)->e_machine == CHIP_ELF_TYPE() || \ | ||
63 | (x)->e_machine == CHIP_COMPAT_ELF_TYPE())) | ||
64 | |||
65 | /* The module loader only handles a few relocation types. */ | ||
66 | #ifndef __tilegx__ | ||
67 | #define R_TILE_32 1 | ||
68 | #define R_TILE_JOFFLONG_X1 15 | ||
69 | #define R_TILE_IMM16_X0_LO 25 | ||
70 | #define R_TILE_IMM16_X1_LO 26 | ||
71 | #define R_TILE_IMM16_X0_HA 29 | ||
72 | #define R_TILE_IMM16_X1_HA 30 | ||
73 | #else | ||
74 | #define R_TILEGX_64 1 | ||
75 | #define R_TILEGX_JUMPOFF_X1 21 | ||
76 | #define R_TILEGX_IMM16_X0_HW0 36 | ||
77 | #define R_TILEGX_IMM16_X1_HW0 37 | ||
78 | #define R_TILEGX_IMM16_X0_HW1 38 | ||
79 | #define R_TILEGX_IMM16_X1_HW1 39 | ||
80 | #define R_TILEGX_IMM16_X0_HW2_LAST 48 | ||
81 | #define R_TILEGX_IMM16_X1_HW2_LAST 49 | ||
82 | #endif | ||
83 | |||
84 | /* Use standard page size for core dumps. */ | ||
85 | #define ELF_EXEC_PAGESIZE PAGE_SIZE | ||
86 | |||
87 | /* | ||
88 | * This is the location that an ET_DYN program is loaded if exec'ed. Typical | ||
89 | * use of this is to invoke "./ld.so someprog" to test out a new version of | ||
90 | * the loader. We need to make sure that it is out of the way of the program | ||
91 | * that it will "exec", and that there is sufficient room for the brk. | ||
92 | */ | ||
93 | #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) | ||
94 | |||
95 | #define ELF_CORE_COPY_REGS(_dest, _regs) \ | ||
96 | memcpy((char *) &_dest, (char *) _regs, \ | ||
97 | sizeof(struct pt_regs)); | ||
98 | |||
99 | /* No additional FP registers to copy. */ | ||
100 | #define ELF_CORE_COPY_FPREGS(t, fpu) 0 | ||
101 | |||
102 | /* | ||
103 | * This yields a mask that user programs can use to figure out what | ||
104 | * instruction set this CPU supports. This could be done in user space, | ||
105 | * but it's not easy, and we've already done it here. | ||
106 | */ | ||
107 | #define ELF_HWCAP (0) | ||
108 | |||
109 | /* | ||
110 | * This yields a string that ld.so will use to load implementation | ||
111 | * specific libraries for optimization. This is more specific in | ||
112 | * intent than poking at uname or /proc/cpuinfo. | ||
113 | */ | ||
114 | #define ELF_PLATFORM (NULL) | ||
115 | |||
116 | extern void elf_plat_init(struct pt_regs *regs, unsigned long load_addr); | ||
117 | |||
118 | #define ELF_PLAT_INIT(_r, load_addr) elf_plat_init(_r, load_addr) | ||
119 | |||
120 | extern int dump_task_regs(struct task_struct *, elf_gregset_t *); | ||
121 | #define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs) | ||
122 | |||
123 | /* Tilera Linux has no personalities currently, so no need to do anything. */ | ||
124 | #define SET_PERSONALITY(ex) do { } while (0) | ||
125 | |||
126 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
127 | /* Support auto-mapping of the user interrupt vectors. */ | ||
128 | struct linux_binprm; | ||
129 | extern int arch_setup_additional_pages(struct linux_binprm *bprm, | ||
130 | int executable_stack); | ||
131 | #ifdef CONFIG_COMPAT | ||
132 | |||
133 | #define COMPAT_ELF_PLATFORM "tilegx-m32" | ||
134 | |||
135 | /* | ||
136 | * "Compat" binaries have the same machine type, but 32-bit class, | ||
137 | * since they're not a separate machine type, but just a 32-bit | ||
138 | * variant of the standard 64-bit architecture. | ||
139 | */ | ||
140 | #define compat_elf_check_arch(x) \ | ||
141 | ((x)->e_ident[EI_CLASS] == ELFCLASS32 && \ | ||
142 | ((x)->e_machine == CHIP_ELF_TYPE() || \ | ||
143 | (x)->e_machine == CHIP_COMPAT_ELF_TYPE())) | ||
144 | |||
145 | #define compat_start_thread(regs, ip, usp) do { \ | ||
146 | regs->pc = ptr_to_compat_reg((void *)(ip)); \ | ||
147 | regs->sp = ptr_to_compat_reg((void *)(usp)); \ | ||
148 | } while (0) | ||
149 | |||
150 | /* | ||
151 | * Use SET_PERSONALITY to indicate compatibility via TS_COMPAT. | ||
152 | */ | ||
153 | #undef SET_PERSONALITY | ||
154 | #define SET_PERSONALITY(ex) \ | ||
155 | do { \ | ||
156 | current->personality = PER_LINUX; \ | ||
157 | current_thread_info()->status &= ~TS_COMPAT; \ | ||
158 | } while (0) | ||
159 | #define COMPAT_SET_PERSONALITY(ex) \ | ||
160 | do { \ | ||
161 | current->personality = PER_LINUX_32BIT; \ | ||
162 | current_thread_info()->status |= TS_COMPAT; \ | ||
163 | } while (0) | ||
164 | |||
165 | #define COMPAT_ELF_ET_DYN_BASE (0xffffffff / 3 * 2) | ||
166 | |||
167 | #endif /* CONFIG_COMPAT */ | ||
168 | |||
169 | #endif /* _ASM_TILE_ELF_H */ | ||
diff --git a/arch/tile/include/asm/emergency-restart.h b/arch/tile/include/asm/emergency-restart.h new file mode 100644 index 000000000000..3711bd9d50bd --- /dev/null +++ b/arch/tile/include/asm/emergency-restart.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/emergency-restart.h> | |||
diff --git a/arch/tile/include/asm/errno.h b/arch/tile/include/asm/errno.h new file mode 100644 index 000000000000..4c82b503d92f --- /dev/null +++ b/arch/tile/include/asm/errno.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/errno.h> | |||
diff --git a/arch/tile/include/asm/fcntl.h b/arch/tile/include/asm/fcntl.h new file mode 100644 index 000000000000..46ab12db5739 --- /dev/null +++ b/arch/tile/include/asm/fcntl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/fcntl.h> | |||
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h new file mode 100644 index 000000000000..51537ff9265a --- /dev/null +++ b/arch/tile/include/asm/fixmap.h | |||
@@ -0,0 +1,124 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1998 Ingo Molnar | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_FIXMAP_H | ||
17 | #define _ASM_TILE_FIXMAP_H | ||
18 | |||
19 | #include <asm/page.h> | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | #include <linux/kernel.h> | ||
23 | #ifdef CONFIG_HIGHMEM | ||
24 | #include <linux/threads.h> | ||
25 | #include <asm/kmap_types.h> | ||
26 | #endif | ||
27 | |||
28 | #define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT)) | ||
29 | #define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT) | ||
30 | |||
31 | /* | ||
32 | * Here we define all the compile-time 'special' virtual | ||
33 | * addresses. The point is to have a constant address at | ||
34 | * compile time, but to set the physical address only | ||
35 | * in the boot process. We allocate these special addresses | ||
36 | * from the end of supervisor virtual memory backwards. | ||
37 | * Also this lets us do fail-safe vmalloc(), we | ||
38 | * can guarantee that these special addresses and | ||
39 | * vmalloc()-ed addresses never overlap. | ||
40 | * | ||
41 | * these 'compile-time allocated' memory buffers are | ||
42 | * fixed-size 4k pages. (or larger if used with an increment | ||
43 | * higher than 1) use fixmap_set(idx,phys) to associate | ||
44 | * physical memory with fixmap indices. | ||
45 | * | ||
46 | * TLB entries of such buffers will not be flushed across | ||
47 | * task switches. | ||
48 | * | ||
49 | * We don't bother with a FIX_HOLE since above the fixmaps | ||
50 | * is unmapped memory in any case. | ||
51 | */ | ||
52 | enum fixed_addresses { | ||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | ||
55 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | ||
56 | #endif | ||
57 | __end_of_permanent_fixed_addresses, | ||
58 | |||
59 | /* | ||
60 | * Temporary boot-time mappings, used before ioremap() is functional. | ||
61 | * Not currently needed by the Tile architecture. | ||
62 | */ | ||
63 | #define NR_FIX_BTMAPS 0 | ||
64 | #if NR_FIX_BTMAPS | ||
65 | FIX_BTMAP_END = __end_of_permanent_fixed_addresses, | ||
66 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1, | ||
67 | __end_of_fixed_addresses | ||
68 | #else | ||
69 | __end_of_fixed_addresses = __end_of_permanent_fixed_addresses | ||
70 | #endif | ||
71 | }; | ||
72 | |||
73 | extern void __set_fixmap(enum fixed_addresses idx, | ||
74 | unsigned long phys, pgprot_t flags); | ||
75 | |||
76 | #define set_fixmap(idx, phys) \ | ||
77 | __set_fixmap(idx, phys, PAGE_KERNEL) | ||
78 | /* | ||
79 | * Some hardware wants to get fixmapped without caching. | ||
80 | */ | ||
81 | #define set_fixmap_nocache(idx, phys) \ | ||
82 | __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) | ||
83 | |||
84 | #define clear_fixmap(idx) \ | ||
85 | __set_fixmap(idx, 0, __pgprot(0)) | ||
86 | |||
87 | #define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | ||
88 | #define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) | ||
89 | #define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) | ||
90 | #define FIXADDR_BOOT_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_BOOT_SIZE) | ||
91 | |||
92 | extern void __this_fixmap_does_not_exist(void); | ||
93 | |||
94 | /* | ||
95 | * 'index to address' translation. If anyone tries to use the idx | ||
96 | * directly without tranlation, we catch the bug with a NULL-deference | ||
97 | * kernel oops. Illegal ranges of incoming indices are caught too. | ||
98 | */ | ||
99 | static __always_inline unsigned long fix_to_virt(const unsigned int idx) | ||
100 | { | ||
101 | /* | ||
102 | * this branch gets completely eliminated after inlining, | ||
103 | * except when someone tries to use fixaddr indices in an | ||
104 | * illegal way. (such as mixing up address types or using | ||
105 | * out-of-range indices). | ||
106 | * | ||
107 | * If it doesn't get removed, the linker will complain | ||
108 | * loudly with a reasonably clear error message.. | ||
109 | */ | ||
110 | if (idx >= __end_of_fixed_addresses) | ||
111 | __this_fixmap_does_not_exist(); | ||
112 | |||
113 | return __fix_to_virt(idx); | ||
114 | } | ||
115 | |||
116 | static inline unsigned long virt_to_fix(const unsigned long vaddr) | ||
117 | { | ||
118 | BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); | ||
119 | return __virt_to_fix(vaddr); | ||
120 | } | ||
121 | |||
122 | #endif /* !__ASSEMBLY__ */ | ||
123 | |||
124 | #endif /* _ASM_TILE_FIXMAP_H */ | ||
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h new file mode 100644 index 000000000000..461459b06d98 --- /dev/null +++ b/arch/tile/include/asm/ftrace.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_FTRACE_H | ||
16 | #define _ASM_TILE_FTRACE_H | ||
17 | |||
18 | /* empty */ | ||
19 | |||
20 | #endif /* _ASM_TILE_FTRACE_H */ | ||
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h new file mode 100644 index 000000000000..9eaeb3c08786 --- /dev/null +++ b/arch/tile/include/asm/futex.h | |||
@@ -0,0 +1,136 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * These routines make two important assumptions: | ||
15 | * | ||
16 | * 1. atomic_t is really an int and can be freely cast back and forth | ||
17 | * (validated in __init_atomic_per_cpu). | ||
18 | * | ||
19 | * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using | ||
20 | * the same locking convention that all the kernel atomic routines use. | ||
21 | */ | ||
22 | |||
23 | #ifndef _ASM_TILE_FUTEX_H | ||
24 | #define _ASM_TILE_FUTEX_H | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | |||
28 | #include <linux/futex.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | #include <linux/errno.h> | ||
31 | |||
32 | extern struct __get_user futex_set(int *v, int i); | ||
33 | extern struct __get_user futex_add(int *v, int n); | ||
34 | extern struct __get_user futex_or(int *v, int n); | ||
35 | extern struct __get_user futex_andn(int *v, int n); | ||
36 | extern struct __get_user futex_cmpxchg(int *v, int o, int n); | ||
37 | |||
38 | #ifndef __tilegx__ | ||
39 | extern struct __get_user futex_xor(int *v, int n); | ||
40 | #else | ||
41 | static inline struct __get_user futex_xor(int __user *uaddr, int n) | ||
42 | { | ||
43 | struct __get_user asm_ret = __get_user_4(uaddr); | ||
44 | if (!asm_ret.err) { | ||
45 | int oldval, newval; | ||
46 | do { | ||
47 | oldval = asm_ret.val; | ||
48 | newval = oldval ^ n; | ||
49 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
50 | } while (asm_ret.err == 0 && oldval != asm_ret.val); | ||
51 | } | ||
52 | return asm_ret; | ||
53 | } | ||
54 | #endif | ||
55 | |||
56 | static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | ||
57 | { | ||
58 | int op = (encoded_op >> 28) & 7; | ||
59 | int cmp = (encoded_op >> 24) & 15; | ||
60 | int oparg = (encoded_op << 8) >> 20; | ||
61 | int cmparg = (encoded_op << 20) >> 20; | ||
62 | int ret; | ||
63 | struct __get_user asm_ret; | ||
64 | |||
65 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | ||
66 | oparg = 1 << oparg; | ||
67 | |||
68 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
69 | return -EFAULT; | ||
70 | |||
71 | pagefault_disable(); | ||
72 | switch (op) { | ||
73 | case FUTEX_OP_SET: | ||
74 | asm_ret = futex_set(uaddr, oparg); | ||
75 | break; | ||
76 | case FUTEX_OP_ADD: | ||
77 | asm_ret = futex_add(uaddr, oparg); | ||
78 | break; | ||
79 | case FUTEX_OP_OR: | ||
80 | asm_ret = futex_or(uaddr, oparg); | ||
81 | break; | ||
82 | case FUTEX_OP_ANDN: | ||
83 | asm_ret = futex_andn(uaddr, oparg); | ||
84 | break; | ||
85 | case FUTEX_OP_XOR: | ||
86 | asm_ret = futex_xor(uaddr, oparg); | ||
87 | break; | ||
88 | default: | ||
89 | asm_ret.err = -ENOSYS; | ||
90 | } | ||
91 | pagefault_enable(); | ||
92 | |||
93 | ret = asm_ret.err; | ||
94 | |||
95 | if (!ret) { | ||
96 | switch (cmp) { | ||
97 | case FUTEX_OP_CMP_EQ: | ||
98 | ret = (asm_ret.val == cmparg); | ||
99 | break; | ||
100 | case FUTEX_OP_CMP_NE: | ||
101 | ret = (asm_ret.val != cmparg); | ||
102 | break; | ||
103 | case FUTEX_OP_CMP_LT: | ||
104 | ret = (asm_ret.val < cmparg); | ||
105 | break; | ||
106 | case FUTEX_OP_CMP_GE: | ||
107 | ret = (asm_ret.val >= cmparg); | ||
108 | break; | ||
109 | case FUTEX_OP_CMP_LE: | ||
110 | ret = (asm_ret.val <= cmparg); | ||
111 | break; | ||
112 | case FUTEX_OP_CMP_GT: | ||
113 | ret = (asm_ret.val > cmparg); | ||
114 | break; | ||
115 | default: | ||
116 | ret = -ENOSYS; | ||
117 | } | ||
118 | } | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, | ||
123 | int newval) | ||
124 | { | ||
125 | struct __get_user asm_ret; | ||
126 | |||
127 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
128 | return -EFAULT; | ||
129 | |||
130 | asm_ret = futex_cmpxchg(uaddr, oldval, newval); | ||
131 | return asm_ret.err ? asm_ret.err : asm_ret.val; | ||
132 | } | ||
133 | |||
134 | #endif /* !__ASSEMBLY__ */ | ||
135 | |||
136 | #endif /* _ASM_TILE_FUTEX_H */ | ||
diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h new file mode 100644 index 000000000000..822390f9a154 --- /dev/null +++ b/arch/tile/include/asm/hardirq.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HARDIRQ_H | ||
16 | #define _ASM_TILE_HARDIRQ_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/cache.h> | ||
20 | |||
21 | #include <asm/irq.h> | ||
22 | |||
23 | typedef struct { | ||
24 | unsigned int __softirq_pending; | ||
25 | long idle_timestamp; | ||
26 | |||
27 | /* Hard interrupt statistics. */ | ||
28 | unsigned int irq_timer_count; | ||
29 | unsigned int irq_syscall_count; | ||
30 | unsigned int irq_resched_count; | ||
31 | unsigned int irq_hv_flush_count; | ||
32 | unsigned int irq_call_count; | ||
33 | unsigned int irq_hv_msg_count; | ||
34 | unsigned int irq_dev_intr_count; | ||
35 | |||
36 | } ____cacheline_aligned irq_cpustat_t; | ||
37 | |||
38 | DECLARE_PER_CPU(irq_cpustat_t, irq_stat); | ||
39 | |||
40 | #define __ARCH_IRQ_STAT | ||
41 | #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member) | ||
42 | |||
43 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | ||
44 | |||
45 | #define HARDIRQ_BITS 8 | ||
46 | |||
47 | #endif /* _ASM_TILE_HARDIRQ_H */ | ||
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h new file mode 100644 index 000000000000..efdd12e91020 --- /dev/null +++ b/arch/tile/include/asm/highmem.h | |||
@@ -0,0 +1,73 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1999 Gerhard Wichert, Siemens AG | ||
3 | * Gerhard.Wichert@pdb.siemens.de | ||
4 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation, version 2. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
13 | * NON INFRINGEMENT. See the GNU General Public License for | ||
14 | * more details. | ||
15 | * | ||
16 | * Used in CONFIG_HIGHMEM systems for memory pages which | ||
17 | * are not addressable by direct kernel virtual addresses. | ||
18 | * | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_TILE_HIGHMEM_H | ||
22 | #define _ASM_TILE_HIGHMEM_H | ||
23 | |||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/threads.h> | ||
26 | #include <asm/kmap_types.h> | ||
27 | #include <asm/tlbflush.h> | ||
28 | #include <asm/homecache.h> | ||
29 | |||
30 | /* declarations for highmem.c */ | ||
31 | extern unsigned long highstart_pfn, highend_pfn; | ||
32 | |||
33 | extern pte_t *pkmap_page_table; | ||
34 | |||
35 | /* | ||
36 | * Ordering is: | ||
37 | * | ||
38 | * FIXADDR_TOP | ||
39 | * fixed_addresses | ||
40 | * FIXADDR_START | ||
41 | * temp fixed addresses | ||
42 | * FIXADDR_BOOT_START | ||
43 | * Persistent kmap area | ||
44 | * PKMAP_BASE | ||
45 | * VMALLOC_END | ||
46 | * Vmalloc area | ||
47 | * VMALLOC_START | ||
48 | * high_memory | ||
49 | */ | ||
50 | #define LAST_PKMAP_MASK (LAST_PKMAP-1) | ||
51 | #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) | ||
52 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) | ||
53 | |||
54 | void *kmap_high(struct page *page); | ||
55 | void kunmap_high(struct page *page); | ||
56 | void *kmap(struct page *page); | ||
57 | void kunmap(struct page *page); | ||
58 | void *kmap_fix_kpte(struct page *page, int finished); | ||
59 | |||
60 | /* This macro is used only in map_new_virtual() to map "page". */ | ||
61 | #define kmap_prot page_to_kpgprot(page) | ||
62 | |||
63 | void kunmap_atomic(void *kvaddr, enum km_type type); | ||
64 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type); | ||
65 | void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); | ||
66 | struct page *kmap_atomic_to_page(void *ptr); | ||
67 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot); | ||
68 | void *kmap_atomic(struct page *page, enum km_type type); | ||
69 | void kmap_atomic_fix_kpte(struct page *page, int finished); | ||
70 | |||
71 | #define flush_cache_kmaps() do { } while (0) | ||
72 | |||
73 | #endif /* _ASM_TILE_HIGHMEM_H */ | ||
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h new file mode 100644 index 000000000000..a8243865d49e --- /dev/null +++ b/arch/tile/include/asm/homecache.h | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * Handle issues around the Tile "home cache" model of coherence. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_HOMECACHE_H | ||
18 | #define _ASM_TILE_HOMECACHE_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | |||
23 | struct page; | ||
24 | struct task_struct; | ||
25 | struct vm_area_struct; | ||
26 | struct zone; | ||
27 | |||
28 | /* | ||
29 | * Coherence point for the page is its memory controller. | ||
30 | * It is not present in any cache (L1 or L2). | ||
31 | */ | ||
32 | #define PAGE_HOME_UNCACHED -1 | ||
33 | |||
34 | /* | ||
35 | * Is this page immutable (unwritable) and thus able to be cached more | ||
36 | * widely than would otherwise be possible? On tile64 this means we | ||
37 | * mark the PTE to cache locally; on tilepro it means we have "nc" set. | ||
38 | */ | ||
39 | #define PAGE_HOME_IMMUTABLE -2 | ||
40 | |||
41 | /* | ||
42 | * Each cpu considers its own cache to be the home for the page, | ||
43 | * which makes it incoherent. | ||
44 | */ | ||
45 | #define PAGE_HOME_INCOHERENT -3 | ||
46 | |||
47 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
48 | /* Home for the page is distributed via hash-for-home. */ | ||
49 | #define PAGE_HOME_HASH -4 | ||
50 | #endif | ||
51 | |||
52 | /* Homing is unknown or unspecified. Not valid for page_home(). */ | ||
53 | #define PAGE_HOME_UNKNOWN -5 | ||
54 | |||
55 | /* Home on the current cpu. Not valid for page_home(). */ | ||
56 | #define PAGE_HOME_HERE -6 | ||
57 | |||
58 | /* Support wrapper to use instead of explicit hv_flush_remote(). */ | ||
59 | extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, | ||
60 | const struct cpumask *cache_cpumask, | ||
61 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
62 | unsigned long tlb_pgsize, | ||
63 | const struct cpumask *tlb_cpumask, | ||
64 | HV_Remote_ASID *asids, int asidcount); | ||
65 | |||
66 | /* Set homing-related bits in a PTE (can also pass a pgprot_t). */ | ||
67 | extern pte_t pte_set_home(pte_t pte, int home); | ||
68 | |||
69 | /* Do a cache eviction on the specified cpus. */ | ||
70 | extern void homecache_evict(const struct cpumask *mask); | ||
71 | |||
72 | /* | ||
73 | * Change a kernel page's homecache. It must not be mapped in user space. | ||
74 | * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when | ||
75 | * no other cpu can reference the page, and causes a full-chip cache/TLB flush. | ||
76 | */ | ||
77 | extern void homecache_change_page_home(struct page *, int order, int home); | ||
78 | |||
79 | /* | ||
80 | * Flush a page out of whatever cache(s) it is in. | ||
81 | * This is more than just finv, since it properly handles waiting | ||
82 | * for the data to reach memory on tilepro, but it can be quite | ||
83 | * heavyweight, particularly on hash-for-home memory. | ||
84 | */ | ||
85 | extern void homecache_flush_cache(struct page *, int order); | ||
86 | |||
87 | /* | ||
88 | * Allocate a page with the given GFP flags, home, and optionally | ||
89 | * node. These routines are actually just wrappers around the normal | ||
90 | * alloc_pages() / alloc_pages_node() functions, which set and clear | ||
91 | * a per-cpu variable to communicate with homecache_new_kernel_page(). | ||
92 | * If !CONFIG_HOMECACHE, uses homecache_change_page_home(). | ||
93 | */ | ||
94 | extern struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
95 | unsigned int order, int home); | ||
96 | extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
97 | unsigned int order, int home); | ||
98 | #define homecache_alloc_page(gfp_mask, home) \ | ||
99 | homecache_alloc_pages(gfp_mask, 0, home) | ||
100 | |||
101 | /* | ||
102 | * These routines are just pass-throughs to free_pages() when | ||
103 | * we support full homecaching. If !CONFIG_HOMECACHE, then these | ||
104 | * routines use homecache_change_page_home() to reset the home | ||
105 | * back to the default before returning the page to the allocator. | ||
106 | */ | ||
107 | void homecache_free_pages(unsigned long addr, unsigned int order); | ||
108 | #define homecache_free_page(page) \ | ||
109 | homecache_free_pages((page), 0) | ||
110 | |||
111 | |||
112 | |||
113 | /* | ||
114 | * Report the page home for LOWMEM pages by examining their kernel PTE, | ||
115 | * or for highmem pages as the default home. | ||
116 | */ | ||
117 | extern int page_home(struct page *); | ||
118 | |||
119 | #define homecache_migrate_kthread() do {} while (0) | ||
120 | |||
121 | #define homecache_kpte_lock() 0 | ||
122 | #define homecache_kpte_unlock(flags) do {} while (0) | ||
123 | |||
124 | |||
125 | #endif /* _ASM_TILE_HOMECACHE_H */ | ||
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h new file mode 100644 index 000000000000..0521c277bbde --- /dev/null +++ b/arch/tile/include/asm/hugetlb.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HUGETLB_H | ||
16 | #define _ASM_TILE_HUGETLB_H | ||
17 | |||
18 | #include <asm/page.h> | ||
19 | |||
20 | |||
21 | static inline int is_hugepage_only_range(struct mm_struct *mm, | ||
22 | unsigned long addr, | ||
23 | unsigned long len) { | ||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * If the arch doesn't supply something else, assume that hugepage | ||
29 | * size aligned regions are ok without further preparation. | ||
30 | */ | ||
31 | static inline int prepare_hugepage_range(struct file *file, | ||
32 | unsigned long addr, unsigned long len) | ||
33 | { | ||
34 | struct hstate *h = hstate_file(file); | ||
35 | if (len & ~huge_page_mask(h)) | ||
36 | return -EINVAL; | ||
37 | if (addr & ~huge_page_mask(h)) | ||
38 | return -EINVAL; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | ||
47 | unsigned long addr, unsigned long end, | ||
48 | unsigned long floor, | ||
49 | unsigned long ceiling) | ||
50 | { | ||
51 | free_pgd_range(tlb, addr, end, floor, ceiling); | ||
52 | } | ||
53 | |||
54 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | ||
55 | pte_t *ptep, pte_t pte) | ||
56 | { | ||
57 | set_pte_order(ptep, pte, HUGETLB_PAGE_ORDER); | ||
58 | } | ||
59 | |||
60 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
61 | unsigned long addr, pte_t *ptep) | ||
62 | { | ||
63 | return ptep_get_and_clear(mm, addr, ptep); | ||
64 | } | ||
65 | |||
66 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
67 | unsigned long addr, pte_t *ptep) | ||
68 | { | ||
69 | ptep_clear_flush(vma, addr, ptep); | ||
70 | } | ||
71 | |||
72 | static inline int huge_pte_none(pte_t pte) | ||
73 | { | ||
74 | return pte_none(pte); | ||
75 | } | ||
76 | |||
77 | static inline pte_t huge_pte_wrprotect(pte_t pte) | ||
78 | { | ||
79 | return pte_wrprotect(pte); | ||
80 | } | ||
81 | |||
82 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | ||
83 | unsigned long addr, pte_t *ptep) | ||
84 | { | ||
85 | ptep_set_wrprotect(mm, addr, ptep); | ||
86 | } | ||
87 | |||
88 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, | ||
89 | unsigned long addr, pte_t *ptep, | ||
90 | pte_t pte, int dirty) | ||
91 | { | ||
92 | return ptep_set_access_flags(vma, addr, ptep, pte, dirty); | ||
93 | } | ||
94 | |||
95 | static inline pte_t huge_ptep_get(pte_t *ptep) | ||
96 | { | ||
97 | return *ptep; | ||
98 | } | ||
99 | |||
100 | static inline int arch_prepare_hugepage(struct page *page) | ||
101 | { | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline void arch_release_hugepage(struct page *page) | ||
106 | { | ||
107 | } | ||
108 | |||
109 | #endif /* _ASM_TILE_HUGETLB_H */ | ||
diff --git a/arch/tile/include/asm/hv_driver.h b/arch/tile/include/asm/hv_driver.h new file mode 100644 index 000000000000..ad614de899b3 --- /dev/null +++ b/arch/tile/include/asm/hv_driver.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This header defines a wrapper interface for managing hypervisor | ||
15 | * device calls that will result in an interrupt at some later time. | ||
16 | * In particular, this provides wrappers for hv_preada() and | ||
17 | * hv_pwritea(). | ||
18 | */ | ||
19 | |||
20 | #ifndef _ASM_TILE_HV_DRIVER_H | ||
21 | #define _ASM_TILE_HV_DRIVER_H | ||
22 | |||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | struct hv_driver_cb; | ||
26 | |||
27 | /* A callback to be invoked when an operation completes. */ | ||
28 | typedef void hv_driver_callback_t(struct hv_driver_cb *cb, __hv32 result); | ||
29 | |||
30 | /* | ||
31 | * A structure to hold information about an outstanding call. | ||
32 | * The driver must allocate a separate structure for each call. | ||
33 | */ | ||
34 | struct hv_driver_cb { | ||
35 | hv_driver_callback_t *callback; /* Function to call on interrupt. */ | ||
36 | void *dev; /* Driver-specific state variable. */ | ||
37 | }; | ||
38 | |||
39 | /* Wrapper for invoking hv_dev_preada(). */ | ||
40 | static inline int | ||
41 | tile_hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
42 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
43 | struct hv_driver_cb *callback) | ||
44 | { | ||
45 | return hv_dev_preada(devhdl, flags, sgl_len, sgl, | ||
46 | offset, (HV_IntArg)callback); | ||
47 | } | ||
48 | |||
49 | /* Wrapper for invoking hv_dev_pwritea(). */ | ||
50 | static inline int | ||
51 | tile_hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
52 | HV_SGL sgl[/* sgl_len */], __hv64 offset, | ||
53 | struct hv_driver_cb *callback) | ||
54 | { | ||
55 | return hv_dev_pwritea(devhdl, flags, sgl_len, sgl, | ||
56 | offset, (HV_IntArg)callback); | ||
57 | } | ||
58 | |||
59 | |||
60 | #endif /* _ASM_TILE_HV_DRIVER_H */ | ||
diff --git a/arch/tile/include/asm/hw_irq.h b/arch/tile/include/asm/hw_irq.h new file mode 100644 index 000000000000..4fac5fbf333e --- /dev/null +++ b/arch/tile/include/asm/hw_irq.h | |||
@@ -0,0 +1,18 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_HW_IRQ_H | ||
16 | #define _ASM_TILE_HW_IRQ_H | ||
17 | |||
18 | #endif /* _ASM_TILE_HW_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/ide.h b/arch/tile/include/asm/ide.h new file mode 100644 index 000000000000..3c6f2ed894ce --- /dev/null +++ b/arch/tile/include/asm/ide.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IDE_H | ||
16 | #define _ASM_TILE_IDE_H | ||
17 | |||
18 | /* For IDE on PCI */ | ||
19 | #define MAX_HWIFS 10 | ||
20 | |||
21 | #define ide_default_io_ctl(base) (0) | ||
22 | |||
23 | #include <asm-generic/ide_iops.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_IDE_H */ | ||
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h new file mode 100644 index 000000000000..8c95bef3fa45 --- /dev/null +++ b/arch/tile/include/asm/io.h | |||
@@ -0,0 +1,279 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IO_H | ||
16 | #define _ASM_TILE_IO_H | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/bug.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | #define IO_SPACE_LIMIT 0xfffffffful | ||
23 | |||
24 | /* | ||
25 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem | ||
26 | * access. | ||
27 | */ | ||
28 | #define xlate_dev_mem_ptr(p) __va(p) | ||
29 | |||
30 | /* | ||
31 | * Convert a virtual cached pointer to an uncached pointer. | ||
32 | */ | ||
33 | #define xlate_dev_kmem_ptr(p) p | ||
34 | |||
35 | /* | ||
36 | * Change "struct page" to physical address. | ||
37 | */ | ||
38 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) | ||
39 | |||
40 | /* | ||
41 | * Some places try to pass in an loff_t for PHYSADDR (?!), so we cast it to | ||
42 | * long before casting it to a pointer to avoid compiler warnings. | ||
43 | */ | ||
44 | #if CHIP_HAS_MMIO() | ||
45 | extern void __iomem *ioremap(resource_size_t offset, unsigned long size); | ||
46 | extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, | ||
47 | pgprot_t pgprot); | ||
48 | extern void iounmap(volatile void __iomem *addr); | ||
49 | #else | ||
50 | #define ioremap(physaddr, size) ((void __iomem *)(unsigned long)(physaddr)) | ||
51 | #define iounmap(addr) ((void)0) | ||
52 | #endif | ||
53 | |||
54 | #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) | ||
55 | #define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) | ||
56 | #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) | ||
57 | |||
58 | void __iomem *ioport_map(unsigned long port, unsigned int len); | ||
59 | extern inline void ioport_unmap(void __iomem *addr) {} | ||
60 | |||
61 | #define mmiowb() | ||
62 | |||
63 | /* Conversion between virtual and physical mappings. */ | ||
64 | #define mm_ptov(addr) ((void *)phys_to_virt(addr)) | ||
65 | #define mm_vtop(addr) ((unsigned long)virt_to_phys(addr)) | ||
66 | |||
67 | #ifdef CONFIG_PCI | ||
68 | |||
69 | extern u8 _tile_readb(unsigned long addr); | ||
70 | extern u16 _tile_readw(unsigned long addr); | ||
71 | extern u32 _tile_readl(unsigned long addr); | ||
72 | extern u64 _tile_readq(unsigned long addr); | ||
73 | extern void _tile_writeb(u8 val, unsigned long addr); | ||
74 | extern void _tile_writew(u16 val, unsigned long addr); | ||
75 | extern void _tile_writel(u32 val, unsigned long addr); | ||
76 | extern void _tile_writeq(u64 val, unsigned long addr); | ||
77 | |||
78 | #else | ||
79 | |||
80 | /* | ||
81 | * The Tile architecture does not support IOMEM unless PCI is enabled. | ||
82 | * Unfortunately we can't yet simply not declare these methods, | ||
83 | * since some generic code that compiles into the kernel, but | ||
84 | * we never run, uses them unconditionally. | ||
85 | */ | ||
86 | |||
87 | static inline int iomem_panic(void) | ||
88 | { | ||
89 | panic("readb/writeb and friends do not exist on tile without PCI"); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | static inline u8 _tile_readb(unsigned long addr) | ||
94 | { | ||
95 | return iomem_panic(); | ||
96 | } | ||
97 | |||
98 | static inline u16 _tile_readw(unsigned long addr) | ||
99 | { | ||
100 | return iomem_panic(); | ||
101 | } | ||
102 | |||
103 | static inline u32 _tile_readl(unsigned long addr) | ||
104 | { | ||
105 | return iomem_panic(); | ||
106 | } | ||
107 | |||
108 | static inline u64 _tile_readq(unsigned long addr) | ||
109 | { | ||
110 | return iomem_panic(); | ||
111 | } | ||
112 | |||
113 | static inline void _tile_writeb(u8 val, unsigned long addr) | ||
114 | { | ||
115 | iomem_panic(); | ||
116 | } | ||
117 | |||
118 | static inline void _tile_writew(u16 val, unsigned long addr) | ||
119 | { | ||
120 | iomem_panic(); | ||
121 | } | ||
122 | |||
123 | static inline void _tile_writel(u32 val, unsigned long addr) | ||
124 | { | ||
125 | iomem_panic(); | ||
126 | } | ||
127 | |||
128 | static inline void _tile_writeq(u64 val, unsigned long addr) | ||
129 | { | ||
130 | iomem_panic(); | ||
131 | } | ||
132 | |||
133 | #endif | ||
134 | |||
135 | #define readb(addr) _tile_readb((unsigned long)addr) | ||
136 | #define readw(addr) _tile_readw((unsigned long)addr) | ||
137 | #define readl(addr) _tile_readl((unsigned long)addr) | ||
138 | #define readq(addr) _tile_readq((unsigned long)addr) | ||
139 | #define writeb(val, addr) _tile_writeb(val, (unsigned long)addr) | ||
140 | #define writew(val, addr) _tile_writew(val, (unsigned long)addr) | ||
141 | #define writel(val, addr) _tile_writel(val, (unsigned long)addr) | ||
142 | #define writeq(val, addr) _tile_writeq(val, (unsigned long)addr) | ||
143 | |||
144 | #define __raw_readb readb | ||
145 | #define __raw_readw readw | ||
146 | #define __raw_readl readl | ||
147 | #define __raw_readq readq | ||
148 | #define __raw_writeb writeb | ||
149 | #define __raw_writew writew | ||
150 | #define __raw_writel writel | ||
151 | #define __raw_writeq writeq | ||
152 | |||
153 | #define readb_relaxed readb | ||
154 | #define readw_relaxed readw | ||
155 | #define readl_relaxed readl | ||
156 | #define readq_relaxed readq | ||
157 | |||
158 | #define ioread8 readb | ||
159 | #define ioread16 readw | ||
160 | #define ioread32 readl | ||
161 | #define ioread64 readq | ||
162 | #define iowrite8 writeb | ||
163 | #define iowrite16 writew | ||
164 | #define iowrite32 writel | ||
165 | #define iowrite64 writeq | ||
166 | |||
167 | static inline void *memcpy_fromio(void *dst, void *src, int len) | ||
168 | { | ||
169 | int x; | ||
170 | BUG_ON((unsigned long)src & 0x3); | ||
171 | for (x = 0; x < len; x += 4) | ||
172 | *(u32 *)(dst + x) = readl(src + x); | ||
173 | return dst; | ||
174 | } | ||
175 | |||
176 | static inline void *memcpy_toio(void *dst, void *src, int len) | ||
177 | { | ||
178 | int x; | ||
179 | BUG_ON((unsigned long)dst & 0x3); | ||
180 | for (x = 0; x < len; x += 4) | ||
181 | writel(*(u32 *)(src + x), dst + x); | ||
182 | return dst; | ||
183 | } | ||
184 | |||
185 | /* | ||
186 | * The Tile architecture does not support IOPORT, even with PCI. | ||
187 | * Unfortunately we can't yet simply not declare these methods, | ||
188 | * since some generic code that compiles into the kernel, but | ||
189 | * we never run, uses them unconditionally. | ||
190 | */ | ||
191 | |||
192 | static inline int ioport_panic(void) | ||
193 | { | ||
194 | panic("inb/outb and friends do not exist on tile"); | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static inline u8 inb(unsigned long addr) | ||
199 | { | ||
200 | return ioport_panic(); | ||
201 | } | ||
202 | |||
203 | static inline u16 inw(unsigned long addr) | ||
204 | { | ||
205 | return ioport_panic(); | ||
206 | } | ||
207 | |||
208 | static inline u32 inl(unsigned long addr) | ||
209 | { | ||
210 | return ioport_panic(); | ||
211 | } | ||
212 | |||
213 | static inline void outb(u8 b, unsigned long addr) | ||
214 | { | ||
215 | ioport_panic(); | ||
216 | } | ||
217 | |||
218 | static inline void outw(u16 b, unsigned long addr) | ||
219 | { | ||
220 | ioport_panic(); | ||
221 | } | ||
222 | |||
223 | static inline void outl(u32 b, unsigned long addr) | ||
224 | { | ||
225 | ioport_panic(); | ||
226 | } | ||
227 | |||
228 | #define inb_p(addr) inb(addr) | ||
229 | #define inw_p(addr) inw(addr) | ||
230 | #define inl_p(addr) inl(addr) | ||
231 | #define outb_p(x, addr) outb((x), (addr)) | ||
232 | #define outw_p(x, addr) outw((x), (addr)) | ||
233 | #define outl_p(x, addr) outl((x), (addr)) | ||
234 | |||
235 | static inline void insb(unsigned long addr, void *buffer, int count) | ||
236 | { | ||
237 | ioport_panic(); | ||
238 | } | ||
239 | |||
240 | static inline void insw(unsigned long addr, void *buffer, int count) | ||
241 | { | ||
242 | ioport_panic(); | ||
243 | } | ||
244 | |||
245 | static inline void insl(unsigned long addr, void *buffer, int count) | ||
246 | { | ||
247 | ioport_panic(); | ||
248 | } | ||
249 | |||
250 | static inline void outsb(unsigned long addr, const void *buffer, int count) | ||
251 | { | ||
252 | ioport_panic(); | ||
253 | } | ||
254 | |||
255 | static inline void outsw(unsigned long addr, const void *buffer, int count) | ||
256 | { | ||
257 | ioport_panic(); | ||
258 | } | ||
259 | |||
260 | static inline void outsl(unsigned long addr, const void *buffer, int count) | ||
261 | { | ||
262 | ioport_panic(); | ||
263 | } | ||
264 | |||
265 | #define ioread8_rep(p, dst, count) \ | ||
266 | insb((unsigned long) (p), (dst), (count)) | ||
267 | #define ioread16_rep(p, dst, count) \ | ||
268 | insw((unsigned long) (p), (dst), (count)) | ||
269 | #define ioread32_rep(p, dst, count) \ | ||
270 | insl((unsigned long) (p), (dst), (count)) | ||
271 | |||
272 | #define iowrite8_rep(p, src, count) \ | ||
273 | outsb((unsigned long) (p), (src), (count)) | ||
274 | #define iowrite16_rep(p, src, count) \ | ||
275 | outsw((unsigned long) (p), (src), (count)) | ||
276 | #define iowrite32_rep(p, src, count) \ | ||
277 | outsl((unsigned long) (p), (src), (count)) | ||
278 | |||
279 | #endif /* _ASM_TILE_IO_H */ | ||
diff --git a/arch/tile/include/asm/ioctl.h b/arch/tile/include/asm/ioctl.h new file mode 100644 index 000000000000..b279fe06dfe5 --- /dev/null +++ b/arch/tile/include/asm/ioctl.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctl.h> | |||
diff --git a/arch/tile/include/asm/ioctls.h b/arch/tile/include/asm/ioctls.h new file mode 100644 index 000000000000..ec34c760665e --- /dev/null +++ b/arch/tile/include/asm/ioctls.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ioctls.h> | |||
diff --git a/arch/tile/include/asm/ipc.h b/arch/tile/include/asm/ipc.h new file mode 100644 index 000000000000..a46e3d9c2a3f --- /dev/null +++ b/arch/tile/include/asm/ipc.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipc.h> | |||
diff --git a/arch/tile/include/asm/ipcbuf.h b/arch/tile/include/asm/ipcbuf.h new file mode 100644 index 000000000000..84c7e51cb6d0 --- /dev/null +++ b/arch/tile/include/asm/ipcbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ipcbuf.h> | |||
diff --git a/arch/tile/include/asm/irq.h b/arch/tile/include/asm/irq.h new file mode 100644 index 000000000000..9be1f849fac9 --- /dev/null +++ b/arch/tile/include/asm/irq.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQ_H | ||
16 | #define _ASM_TILE_IRQ_H | ||
17 | |||
18 | #include <linux/hardirq.h> | ||
19 | |||
20 | /* The hypervisor interface provides 32 IRQs. */ | ||
21 | #define NR_IRQS 32 | ||
22 | |||
23 | /* IRQ numbers used for linux IPIs. */ | ||
24 | #define IRQ_RESCHEDULE 1 | ||
25 | |||
26 | /* The HV interrupt state object. */ | ||
27 | DECLARE_PER_CPU(HV_IntrState, dev_intr_state); | ||
28 | |||
29 | void ack_bad_irq(unsigned int irq); | ||
30 | |||
31 | /* | ||
32 | * Paravirtualized drivers should call this when their init calls | ||
33 | * discover a valid HV IRQ. | ||
34 | */ | ||
35 | void tile_irq_activate(unsigned int irq); | ||
36 | |||
37 | #endif /* _ASM_TILE_IRQ_H */ | ||
diff --git a/arch/tile/include/asm/irq_regs.h b/arch/tile/include/asm/irq_regs.h new file mode 100644 index 000000000000..3dd9c0b70270 --- /dev/null +++ b/arch/tile/include/asm/irq_regs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/irq_regs.h> | |||
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h new file mode 100644 index 000000000000..cf5bffd00fef --- /dev/null +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -0,0 +1,267 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_IRQFLAGS_H | ||
16 | #define _ASM_TILE_IRQFLAGS_H | ||
17 | |||
18 | #include <asm/processor.h> | ||
19 | #include <arch/interrupts.h> | ||
20 | #include <arch/chip.h> | ||
21 | |||
22 | /* | ||
23 | * The set of interrupts we want to allow when interrupts are nominally | ||
24 | * disabled. The remainder are effectively "NMI" interrupts from | ||
25 | * the point of view of the generic Linux code. Note that synchronous | ||
26 | * interrupts (aka "non-queued") are not blocked by the mask in any case. | ||
27 | */ | ||
28 | #if CHIP_HAS_AUX_PERF_COUNTERS() | ||
29 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
30 | (~(INT_MASK(INT_PERF_COUNT) | INT_MASK(INT_AUX_PERF_COUNT))) | ||
31 | #else | ||
32 | #define LINUX_MASKABLE_INTERRUPTS \ | ||
33 | (~(INT_MASK(INT_PERF_COUNT))) | ||
34 | #endif | ||
35 | |||
36 | #ifndef __ASSEMBLY__ | ||
37 | |||
38 | /* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */ | ||
39 | #include <asm/percpu.h> | ||
40 | #include <arch/spr_def.h> | ||
41 | |||
42 | /* Set and clear kernel interrupt masks. */ | ||
43 | #if CHIP_HAS_SPLIT_INTR_MASK() | ||
44 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 | ||
45 | # error Fix assumptions about which word various interrupts are in | ||
46 | #endif | ||
47 | #define interrupt_mask_set(n) do { \ | ||
48 | int __n = (n); \ | ||
49 | int __mask = 1 << (__n & 0x1f); \ | ||
50 | if (__n < 32) \ | ||
51 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, __mask); \ | ||
52 | else \ | ||
53 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, __mask); \ | ||
54 | } while (0) | ||
55 | #define interrupt_mask_reset(n) do { \ | ||
56 | int __n = (n); \ | ||
57 | int __mask = 1 << (__n & 0x1f); \ | ||
58 | if (__n < 32) \ | ||
59 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, __mask); \ | ||
60 | else \ | ||
61 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, __mask); \ | ||
62 | } while (0) | ||
63 | #define interrupt_mask_check(n) ({ \ | ||
64 | int __n = (n); \ | ||
65 | (((__n < 32) ? \ | ||
66 | __insn_mfspr(SPR_INTERRUPT_MASK_1_0) : \ | ||
67 | __insn_mfspr(SPR_INTERRUPT_MASK_1_1)) \ | ||
68 | >> (__n & 0x1f)) & 1; \ | ||
69 | }) | ||
70 | #define interrupt_mask_set_mask(mask) do { \ | ||
71 | unsigned long long __m = (mask); \ | ||
72 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_0, (unsigned long)(__m)); \ | ||
73 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1_1, (unsigned long)(__m>>32)); \ | ||
74 | } while (0) | ||
75 | #define interrupt_mask_reset_mask(mask) do { \ | ||
76 | unsigned long long __m = (mask); \ | ||
77 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_0, (unsigned long)(__m)); \ | ||
78 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1_1, (unsigned long)(__m>>32)); \ | ||
79 | } while (0) | ||
80 | #else | ||
81 | #define interrupt_mask_set(n) \ | ||
82 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (1UL << (n))) | ||
83 | #define interrupt_mask_reset(n) \ | ||
84 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (1UL << (n))) | ||
85 | #define interrupt_mask_check(n) \ | ||
86 | ((__insn_mfspr(SPR_INTERRUPT_MASK_1) >> (n)) & 1) | ||
87 | #define interrupt_mask_set_mask(mask) \ | ||
88 | __insn_mtspr(SPR_INTERRUPT_MASK_SET_1, (mask)) | ||
89 | #define interrupt_mask_reset_mask(mask) \ | ||
90 | __insn_mtspr(SPR_INTERRUPT_MASK_RESET_1, (mask)) | ||
91 | #endif | ||
92 | |||
93 | /* | ||
94 | * The set of interrupts we want active if irqs are enabled. | ||
95 | * Note that in particular, the tile timer interrupt comes and goes | ||
96 | * from this set, since we have no other way to turn off the timer. | ||
97 | * Likewise, INTCTRL_1 is removed and re-added during device | ||
98 | * interrupts, as is the the hardwall UDN_FIREWALL interrupt. | ||
99 | * We use a low bit (MEM_ERROR) as our sentinel value and make sure it | ||
100 | * is always claimed as an "active interrupt" so we can query that bit | ||
101 | * to know our current state. | ||
102 | */ | ||
103 | DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); | ||
104 | #define INITIAL_INTERRUPTS_ENABLED INT_MASK(INT_MEM_ERROR) | ||
105 | |||
106 | /* Disable interrupts. */ | ||
107 | #define raw_local_irq_disable() \ | ||
108 | interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) | ||
109 | |||
110 | /* Disable all interrupts, including NMIs. */ | ||
111 | #define raw_local_irq_disable_all() \ | ||
112 | interrupt_mask_set_mask(-1UL) | ||
113 | |||
114 | /* Re-enable all maskable interrupts. */ | ||
115 | #define raw_local_irq_enable() \ | ||
116 | interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) | ||
117 | |||
118 | /* Disable or enable interrupts based on flag argument. */ | ||
119 | #define raw_local_irq_restore(disabled) do { \ | ||
120 | if (disabled) \ | ||
121 | raw_local_irq_disable(); \ | ||
122 | else \ | ||
123 | raw_local_irq_enable(); \ | ||
124 | } while (0) | ||
125 | |||
126 | /* Return true if "flags" argument means interrupts are disabled. */ | ||
127 | #define raw_irqs_disabled_flags(flags) ((flags) != 0) | ||
128 | |||
129 | /* Return true if interrupts are currently disabled. */ | ||
130 | #define raw_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR) | ||
131 | |||
132 | /* Save whether interrupts are currently disabled. */ | ||
133 | #define raw_local_save_flags(flags) ((flags) = raw_irqs_disabled()) | ||
134 | |||
135 | /* Save whether interrupts are currently disabled, then disable them. */ | ||
136 | #define raw_local_irq_save(flags) \ | ||
137 | do { raw_local_save_flags(flags); raw_local_irq_disable(); } while (0) | ||
138 | |||
139 | /* Prevent the given interrupt from being enabled next time we enable irqs. */ | ||
140 | #define raw_local_irq_mask(interrupt) \ | ||
141 | (__get_cpu_var(interrupts_enabled_mask) &= ~INT_MASK(interrupt)) | ||
142 | |||
143 | /* Prevent the given interrupt from being enabled immediately. */ | ||
144 | #define raw_local_irq_mask_now(interrupt) do { \ | ||
145 | raw_local_irq_mask(interrupt); \ | ||
146 | interrupt_mask_set(interrupt); \ | ||
147 | } while (0) | ||
148 | |||
149 | /* Allow the given interrupt to be enabled next time we enable irqs. */ | ||
150 | #define raw_local_irq_unmask(interrupt) \ | ||
151 | (__get_cpu_var(interrupts_enabled_mask) |= INT_MASK(interrupt)) | ||
152 | |||
153 | /* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ | ||
154 | #define raw_local_irq_unmask_now(interrupt) do { \ | ||
155 | raw_local_irq_unmask(interrupt); \ | ||
156 | if (!irqs_disabled()) \ | ||
157 | interrupt_mask_reset(interrupt); \ | ||
158 | } while (0) | ||
159 | |||
160 | #else /* __ASSEMBLY__ */ | ||
161 | |||
162 | /* We provide a somewhat more restricted set for assembly. */ | ||
163 | |||
164 | #ifdef __tilegx__ | ||
165 | |||
166 | #if INT_MEM_ERROR != 0 | ||
167 | # error Fix IRQ_DISABLED() macro | ||
168 | #endif | ||
169 | |||
170 | /* Return 0 or 1 to indicate whether interrupts are currently disabled. */ | ||
171 | #define IRQS_DISABLED(tmp) \ | ||
172 | mfspr tmp, INTERRUPT_MASK_1; \ | ||
173 | andi tmp, tmp, 1 | ||
174 | |||
175 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
176 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
177 | moveli reg, hw2_last(interrupts_enabled_mask); \ | ||
178 | shl16insli reg, reg, hw1(interrupts_enabled_mask); \ | ||
179 | shl16insli reg, reg, hw0(interrupts_enabled_mask); \ | ||
180 | add reg, reg, tp | ||
181 | |||
182 | /* Disable interrupts. */ | ||
183 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
184 | moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \ | ||
185 | shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \ | ||
186 | shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \ | ||
187 | mtspr INTERRUPT_MASK_SET_1, tmp0 | ||
188 | |||
189 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
190 | #define IRQ_DISABLE_ALL(tmp) \ | ||
191 | movei tmp, -1; \ | ||
192 | mtspr INTERRUPT_MASK_SET_1, tmp | ||
193 | |||
194 | /* Enable interrupts. */ | ||
195 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
196 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
197 | ld tmp0, tmp0; \ | ||
198 | mtspr INTERRUPT_MASK_RESET_1, tmp0 | ||
199 | |||
200 | #else /* !__tilegx__ */ | ||
201 | |||
202 | /* | ||
203 | * Return 0 or 1 to indicate whether interrupts are currently disabled. | ||
204 | * Note that it's important that we use a bit from the "low" mask word, | ||
205 | * since when we are enabling, that is the word we write first, so if we | ||
206 | * are interrupted after only writing half of the mask, the interrupt | ||
207 | * handler will correctly observe that we have interrupts enabled, and | ||
208 | * will enable interrupts itself on return from the interrupt handler | ||
209 | * (making the original code's write of the "high" mask word idempotent). | ||
210 | */ | ||
211 | #define IRQS_DISABLED(tmp) \ | ||
212 | mfspr tmp, INTERRUPT_MASK_1_0; \ | ||
213 | shri tmp, tmp, INT_MEM_ERROR; \ | ||
214 | andi tmp, tmp, 1 | ||
215 | |||
216 | /* Load up a pointer to &interrupts_enabled_mask. */ | ||
217 | #define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \ | ||
218 | moveli reg, lo16(interrupts_enabled_mask); \ | ||
219 | auli reg, reg, ha16(interrupts_enabled_mask);\ | ||
220 | add reg, reg, tp | ||
221 | |||
222 | /* Disable interrupts. */ | ||
223 | #define IRQ_DISABLE(tmp0, tmp1) \ | ||
224 | { \ | ||
225 | movei tmp0, -1; \ | ||
226 | moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS) \ | ||
227 | }; \ | ||
228 | { \ | ||
229 | mtspr INTERRUPT_MASK_SET_1_0, tmp0; \ | ||
230 | auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS) \ | ||
231 | }; \ | ||
232 | mtspr INTERRUPT_MASK_SET_1_1, tmp1 | ||
233 | |||
234 | /* Disable ALL synchronous interrupts (used by NMI entry). */ | ||
235 | #define IRQ_DISABLE_ALL(tmp) \ | ||
236 | movei tmp, -1; \ | ||
237 | mtspr INTERRUPT_MASK_SET_1_0, tmp; \ | ||
238 | mtspr INTERRUPT_MASK_SET_1_1, tmp | ||
239 | |||
240 | /* Enable interrupts. */ | ||
241 | #define IRQ_ENABLE(tmp0, tmp1) \ | ||
242 | GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ | ||
243 | { \ | ||
244 | lw tmp0, tmp0; \ | ||
245 | addi tmp1, tmp0, 4 \ | ||
246 | }; \ | ||
247 | lw tmp1, tmp1; \ | ||
248 | mtspr INTERRUPT_MASK_RESET_1_0, tmp0; \ | ||
249 | mtspr INTERRUPT_MASK_RESET_1_1, tmp1 | ||
250 | #endif | ||
251 | |||
252 | /* | ||
253 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
254 | * C function, but almost everywhere we do, we don't mind clobbering | ||
255 | * all the caller-saved registers. | ||
256 | */ | ||
257 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
258 | # define TRACE_IRQS_ON jal trace_hardirqs_on | ||
259 | # define TRACE_IRQS_OFF jal trace_hardirqs_off | ||
260 | #else | ||
261 | # define TRACE_IRQS_ON | ||
262 | # define TRACE_IRQS_OFF | ||
263 | #endif | ||
264 | |||
265 | #endif /* __ASSEMBLY__ */ | ||
266 | |||
267 | #endif /* _ASM_TILE_IRQFLAGS_H */ | ||
diff --git a/arch/tile/include/asm/kdebug.h b/arch/tile/include/asm/kdebug.h new file mode 100644 index 000000000000..6ece1b037665 --- /dev/null +++ b/arch/tile/include/asm/kdebug.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/kdebug.h> | |||
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h new file mode 100644 index 000000000000..c11a6cc73bb8 --- /dev/null +++ b/arch/tile/include/asm/kexec.h | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * based on kexec.h from other architectures in linux-2.6.18 | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_KEXEC_H | ||
18 | #define _ASM_TILE_KEXEC_H | ||
19 | |||
20 | #include <asm/page.h> | ||
21 | |||
22 | /* Maximum physical address we can use pages from. */ | ||
23 | #define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE | ||
24 | /* Maximum address we can reach in physical address mode. */ | ||
25 | #define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE | ||
26 | /* Maximum address we can use for the control code buffer. */ | ||
27 | #define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE | ||
28 | |||
29 | #define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE | ||
30 | |||
31 | /* | ||
32 | * We don't bother to provide a unique identifier, since we can only | ||
33 | * reboot with a single type of kernel image anyway. | ||
34 | */ | ||
35 | #define KEXEC_ARCH KEXEC_ARCH_DEFAULT | ||
36 | |||
37 | /* Use the tile override for the page allocator. */ | ||
38 | struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order); | ||
39 | #define kimage_alloc_pages_arch kimage_alloc_pages_arch | ||
40 | |||
41 | #define MAX_NOTE_BYTES 1024 | ||
42 | |||
43 | /* Defined in arch/tile/kernel/relocate_kernel.S */ | ||
44 | extern const unsigned char relocate_new_kernel[]; | ||
45 | extern const unsigned long relocate_new_kernel_size; | ||
46 | extern void relocate_new_kernel_end(void); | ||
47 | |||
48 | /* Provide a dummy definition to avoid build failures. */ | ||
49 | static inline void crash_setup_regs(struct pt_regs *n, struct pt_regs *o) | ||
50 | { | ||
51 | } | ||
52 | |||
53 | #endif /* _ASM_TILE_KEXEC_H */ | ||
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h new file mode 100644 index 000000000000..1480106d1c05 --- /dev/null +++ b/arch/tile/include/asm/kmap_types.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_KMAP_TYPES_H | ||
16 | #define _ASM_TILE_KMAP_TYPES_H | ||
17 | |||
18 | /* | ||
19 | * In TILE Linux each set of four of these uses another 16MB chunk of | ||
20 | * address space, given 64 tiles and 64KB pages, so we only enable | ||
21 | * ones that are required by the kernel configuration. | ||
22 | */ | ||
23 | enum km_type { | ||
24 | KM_BOUNCE_READ, | ||
25 | KM_SKB_SUNRPC_DATA, | ||
26 | KM_SKB_DATA_SOFTIRQ, | ||
27 | KM_USER0, | ||
28 | KM_USER1, | ||
29 | KM_BIO_SRC_IRQ, | ||
30 | KM_IRQ0, | ||
31 | KM_IRQ1, | ||
32 | KM_SOFTIRQ0, | ||
33 | KM_SOFTIRQ1, | ||
34 | KM_MEMCPY0, | ||
35 | KM_MEMCPY1, | ||
36 | #if defined(CONFIG_HIGHPTE) | ||
37 | KM_PTE0, | ||
38 | KM_PTE1, | ||
39 | #endif | ||
40 | KM_TYPE_NR | ||
41 | }; | ||
42 | |||
43 | #endif /* _ASM_TILE_KMAP_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/linkage.h b/arch/tile/include/asm/linkage.h new file mode 100644 index 000000000000..e121c39751a7 --- /dev/null +++ b/arch/tile/include/asm/linkage.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_LINKAGE_H | ||
16 | #define _ASM_TILE_LINKAGE_H | ||
17 | |||
18 | #include <feedback.h> | ||
19 | |||
20 | #define __ALIGN .align 8 | ||
21 | |||
22 | /* | ||
23 | * The STD_ENTRY and STD_ENDPROC macros put the function in a | ||
24 | * self-named .text.foo section, and if linker feedback collection | ||
25 | * is enabled, add a suitable call to the feedback collection code. | ||
26 | * STD_ENTRY_SECTION lets you specify a non-standard section name. | ||
27 | */ | ||
28 | |||
29 | #define STD_ENTRY(name) \ | ||
30 | .pushsection .text.##name, "ax"; \ | ||
31 | ENTRY(name); \ | ||
32 | FEEDBACK_ENTER(name) | ||
33 | |||
34 | #define STD_ENTRY_SECTION(name, section) \ | ||
35 | .pushsection section, "ax"; \ | ||
36 | ENTRY(name); \ | ||
37 | FEEDBACK_ENTER_EXPLICIT(name, section, .Lend_##name - name) | ||
38 | |||
39 | #define STD_ENDPROC(name) \ | ||
40 | ENDPROC(name); \ | ||
41 | .Lend_##name:; \ | ||
42 | .popsection | ||
43 | |||
44 | /* Create a file-static function entry set up for feedback gathering. */ | ||
45 | #define STD_ENTRY_LOCAL(name) \ | ||
46 | .pushsection .text.##name, "ax"; \ | ||
47 | ALIGN; \ | ||
48 | name:; \ | ||
49 | FEEDBACK_ENTER(name) | ||
50 | |||
51 | #endif /* _ASM_TILE_LINKAGE_H */ | ||
diff --git a/arch/tile/include/asm/local.h b/arch/tile/include/asm/local.h new file mode 100644 index 000000000000..c11c530f74d0 --- /dev/null +++ b/arch/tile/include/asm/local.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/local.h> | |||
diff --git a/arch/tile/include/asm/memprof.h b/arch/tile/include/asm/memprof.h new file mode 100644 index 000000000000..359949be28c1 --- /dev/null +++ b/arch/tile/include/asm/memprof.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * The hypervisor's memory controller profiling infrastructure allows | ||
15 | * the programmer to find out what fraction of the available memory | ||
16 | * bandwidth is being consumed at each memory controller. The | ||
17 | * profiler provides start, stop, and clear operations to allows | ||
18 | * profiling over a specific time window, as well as an interface for | ||
19 | * reading the most recent profile values. | ||
20 | * | ||
21 | * This header declares IOCTL codes necessary to control memprof. | ||
22 | */ | ||
23 | #ifndef _ASM_TILE_MEMPROF_H | ||
24 | #define _ASM_TILE_MEMPROF_H | ||
25 | |||
26 | #include <linux/ioctl.h> | ||
27 | |||
28 | #define MEMPROF_IOCTL_TYPE 0xB4 | ||
29 | #define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0) | ||
30 | #define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1) | ||
31 | #define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2) | ||
32 | |||
33 | #endif /* _ASM_TILE_MEMPROF_H */ | ||
diff --git a/arch/tile/include/asm/mman.h b/arch/tile/include/asm/mman.h new file mode 100644 index 000000000000..4c6811e3e8dc --- /dev/null +++ b/arch/tile/include/asm/mman.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMAN_H | ||
16 | #define _ASM_TILE_MMAN_H | ||
17 | |||
18 | #include <asm-generic/mman-common.h> | ||
19 | #include <arch/chip.h> | ||
20 | |||
21 | /* Standard Linux flags */ | ||
22 | |||
23 | #define MAP_POPULATE 0x0040 /* populate (prefault) pagetables */ | ||
24 | #define MAP_NONBLOCK 0x0080 /* do not block on IO */ | ||
25 | #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ | ||
26 | #define MAP_LOCKED 0x0200 /* pages are locked */ | ||
27 | #define MAP_NORESERVE 0x0400 /* don't check for reservations */ | ||
28 | #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ | ||
29 | #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ | ||
30 | #define MAP_HUGETLB 0x4000 /* create a huge page mapping */ | ||
31 | |||
32 | |||
33 | /* | ||
34 | * Flags for mlockall | ||
35 | */ | ||
36 | #define MCL_CURRENT 1 /* lock all current mappings */ | ||
37 | #define MCL_FUTURE 2 /* lock all future mappings */ | ||
38 | |||
39 | |||
40 | #endif /* _ASM_TILE_MMAN_H */ | ||
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h new file mode 100644 index 000000000000..92f94c77b6e4 --- /dev/null +++ b/arch/tile/include/asm/mmu.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_H | ||
16 | #define _ASM_TILE_MMU_H | ||
17 | |||
18 | /* Capture any arch- and mm-specific information. */ | ||
19 | struct mm_context { | ||
20 | /* | ||
21 | * Written under the mmap_sem semaphore; read without the | ||
22 | * semaphore but atomically, but it is conservatively set. | ||
23 | */ | ||
24 | unsigned int priority_cached; | ||
25 | }; | ||
26 | |||
27 | typedef struct mm_context mm_context_t; | ||
28 | |||
29 | void leave_mm(int cpu); | ||
30 | |||
31 | #endif /* _ASM_TILE_MMU_H */ | ||
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h new file mode 100644 index 000000000000..9bc0d0725c28 --- /dev/null +++ b/arch/tile/include/asm/mmu_context.h | |||
@@ -0,0 +1,131 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMU_CONTEXT_H | ||
16 | #define _ASM_TILE_MMU_CONTEXT_H | ||
17 | |||
18 | #include <linux/smp.h> | ||
19 | #include <asm/setup.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/homecache.h> | ||
25 | #include <asm-generic/mm_hooks.h> | ||
26 | |||
27 | static inline int | ||
28 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | ||
29 | { | ||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | /* Note that arch/tile/kernel/head.S also calls hv_install_context() */ | ||
34 | static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) | ||
35 | { | ||
36 | /* FIXME: DIRECTIO should not always be set. FIXME. */ | ||
37 | int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); | ||
38 | if (rc < 0) | ||
39 | panic("hv_install_context failed: %d", rc); | ||
40 | } | ||
41 | |||
42 | static inline void install_page_table(pgd_t *pgdir, int asid) | ||
43 | { | ||
44 | pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); | ||
45 | __install_page_table(pgdir, asid, *ptep); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * "Lazy" TLB mode is entered when we are switching to a kernel task, | ||
50 | * which borrows the mm of the previous task. The goal of this | ||
51 | * optimization is to avoid having to install a new page table. On | ||
52 | * early x86 machines (where the concept originated) you couldn't do | ||
53 | * anything short of a full page table install for invalidation, so | ||
54 | * handling a remote TLB invalidate required doing a page table | ||
55 | * re-install. Someone clearly decided that it was silly to keep | ||
56 | * doing this while in "lazy" TLB mode, so the optimization involves | ||
57 | * installing the swapper page table instead the first time one | ||
58 | * occurs, and clearing the cpu out of cpu_vm_mask, so the cpu running | ||
59 | * the kernel task doesn't need to take any more interrupts. At that | ||
60 | * point it's then necessary to explicitly reinstall it when context | ||
61 | * switching back to the original mm. | ||
62 | * | ||
63 | * On Tile, we have to do a page-table install whenever DMA is enabled, | ||
64 | * so in that case lazy mode doesn't help anyway. And more generally, | ||
65 | * we have efficient per-page TLB shootdown, and don't expect to spend | ||
66 | * that much time in kernel tasks in general, so just leaving the | ||
67 | * kernel task borrowing the old page table, but handling TLB | ||
68 | * shootdowns, is a reasonable thing to do. And importantly, this | ||
69 | * lets us use the hypervisor's internal APIs for TLB shootdown, which | ||
70 | * means we don't have to worry about having TLB shootdowns blocked | ||
71 | * when Linux is disabling interrupts; see the page migration code for | ||
72 | * an example of where it's important for TLB shootdowns to complete | ||
73 | * even when interrupts are disabled at the Linux level. | ||
74 | */ | ||
75 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t) | ||
76 | { | ||
77 | #if CHIP_HAS_TILE_DMA() | ||
78 | /* | ||
79 | * We have to do an "identity" page table switch in order to | ||
80 | * clear any pending DMA interrupts. | ||
81 | */ | ||
82 | if (current->thread.tile_dma_state.enabled) | ||
83 | install_page_table(mm->pgd, __get_cpu_var(current_asid)); | ||
84 | #endif | ||
85 | } | ||
86 | |||
87 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
88 | struct task_struct *tsk) | ||
89 | { | ||
90 | if (likely(prev != next)) { | ||
91 | |||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | /* Pick new ASID. */ | ||
95 | int asid = __get_cpu_var(current_asid) + 1; | ||
96 | if (asid > max_asid) { | ||
97 | asid = min_asid; | ||
98 | local_flush_tlb(); | ||
99 | } | ||
100 | __get_cpu_var(current_asid) = asid; | ||
101 | |||
102 | /* Clear cpu from the old mm, and set it in the new one. */ | ||
103 | cpumask_clear_cpu(cpu, &prev->cpu_vm_mask); | ||
104 | cpumask_set_cpu(cpu, &next->cpu_vm_mask); | ||
105 | |||
106 | /* Re-load page tables */ | ||
107 | install_page_table(next->pgd, asid); | ||
108 | |||
109 | /* See how we should set the red/black cache info */ | ||
110 | check_mm_caching(prev, next); | ||
111 | |||
112 | /* | ||
113 | * Since we're changing to a new mm, we have to flush | ||
114 | * the icache in case some physical page now being mapped | ||
115 | * has subsequently been repurposed and has new code. | ||
116 | */ | ||
117 | __flush_icache(); | ||
118 | |||
119 | } | ||
120 | } | ||
121 | |||
122 | static inline void activate_mm(struct mm_struct *prev_mm, | ||
123 | struct mm_struct *next_mm) | ||
124 | { | ||
125 | switch_mm(prev_mm, next_mm, NULL); | ||
126 | } | ||
127 | |||
128 | #define destroy_context(mm) do { } while (0) | ||
129 | #define deactivate_mm(tsk, mm) do { } while (0) | ||
130 | |||
131 | #endif /* _ASM_TILE_MMU_CONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h new file mode 100644 index 000000000000..c6344c4f32ac --- /dev/null +++ b/arch/tile/include/asm/mmzone.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_MMZONE_H | ||
16 | #define _ASM_TILE_MMZONE_H | ||
17 | |||
18 | extern struct pglist_data node_data[]; | ||
19 | #define NODE_DATA(nid) (&node_data[nid]) | ||
20 | |||
21 | extern void get_memcfg_numa(void); | ||
22 | |||
23 | #ifdef CONFIG_DISCONTIGMEM | ||
24 | |||
25 | #include <asm/page.h> | ||
26 | |||
27 | /* | ||
28 | * Generally, memory ranges are always doled out by the hypervisor in | ||
29 | * fixed-size, power-of-two increments. That would make computing the node | ||
30 | * very easy. We could just take a couple high bits of the PA, which | ||
31 | * denote the memory shim, and we'd be done. However, when we're doing | ||
32 | * memory striping, this may not be true; PAs with different high bit | ||
33 | * values might be in the same node. Thus, we keep a lookup table to | ||
34 | * translate the high bits of the PFN to the node number. | ||
35 | */ | ||
36 | extern int highbits_to_node[]; | ||
37 | |||
38 | static inline int pfn_to_nid(unsigned long pfn) | ||
39 | { | ||
40 | return highbits_to_node[__pfn_to_highbits(pfn)]; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Following are macros that each numa implmentation must define. | ||
45 | */ | ||
46 | |||
47 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
48 | #define node_end_pfn(nid) \ | ||
49 | ({ \ | ||
50 | pg_data_t *__pgdat = NODE_DATA(nid); \ | ||
51 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | ||
52 | }) | ||
53 | |||
54 | #define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) | ||
55 | |||
56 | static inline int pfn_valid(int pfn) | ||
57 | { | ||
58 | int nid = pfn_to_nid(pfn); | ||
59 | |||
60 | if (nid >= 0) | ||
61 | return (pfn < node_end_pfn(nid)); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* Information on the NUMA nodes that we compute early */ | ||
66 | extern unsigned long node_start_pfn[]; | ||
67 | extern unsigned long node_end_pfn[]; | ||
68 | extern unsigned long node_memmap_pfn[]; | ||
69 | extern unsigned long node_percpu_pfn[]; | ||
70 | extern unsigned long node_free_pfn[]; | ||
71 | #ifdef CONFIG_HIGHMEM | ||
72 | extern unsigned long node_lowmem_end_pfn[]; | ||
73 | #endif | ||
74 | #ifdef CONFIG_PCI | ||
75 | extern unsigned long pci_reserve_start_pfn; | ||
76 | extern unsigned long pci_reserve_end_pfn; | ||
77 | #endif | ||
78 | |||
79 | #endif /* CONFIG_DISCONTIGMEM */ | ||
80 | |||
81 | #endif /* _ASM_TILE_MMZONE_H */ | ||
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h new file mode 100644 index 000000000000..1e4b79fe8584 --- /dev/null +++ b/arch/tile/include/asm/module.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/module.h> | |||
diff --git a/arch/tile/include/asm/msgbuf.h b/arch/tile/include/asm/msgbuf.h new file mode 100644 index 000000000000..809134c644a6 --- /dev/null +++ b/arch/tile/include/asm/msgbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/msgbuf.h> | |||
diff --git a/arch/tile/include/asm/mutex.h b/arch/tile/include/asm/mutex.h new file mode 100644 index 000000000000..ff6101aa2c71 --- /dev/null +++ b/arch/tile/include/asm/mutex.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/mutex-dec.h> | |||
diff --git a/arch/tile/include/asm/opcode-tile.h b/arch/tile/include/asm/opcode-tile.h new file mode 100644 index 000000000000..ba38959137d7 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_TILE_H | ||
16 | #define _ASM_TILE_OPCODE_TILE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode-tile_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode-tile_32.h> | ||
24 | #endif | ||
25 | |||
26 | /* These definitions are not correct for TILE64, so just avoid them. */ | ||
27 | #undef TILE_ELF_MACHINE_CODE | ||
28 | #undef TILE_ELF_NAME | ||
29 | |||
30 | #endif /* _ASM_TILE_OPCODE_TILE_H */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_32.h b/arch/tile/include/asm/opcode-tile_32.h new file mode 100644 index 000000000000..90f8dd372531 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_32.h | |||
@@ -0,0 +1,1597 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_ADD, | ||
32 | TILE_OPC_ADD_SN, | ||
33 | TILE_OPC_ADDB, | ||
34 | TILE_OPC_ADDB_SN, | ||
35 | TILE_OPC_ADDBS_U, | ||
36 | TILE_OPC_ADDBS_U_SN, | ||
37 | TILE_OPC_ADDH, | ||
38 | TILE_OPC_ADDH_SN, | ||
39 | TILE_OPC_ADDHS, | ||
40 | TILE_OPC_ADDHS_SN, | ||
41 | TILE_OPC_ADDI, | ||
42 | TILE_OPC_ADDI_SN, | ||
43 | TILE_OPC_ADDIB, | ||
44 | TILE_OPC_ADDIB_SN, | ||
45 | TILE_OPC_ADDIH, | ||
46 | TILE_OPC_ADDIH_SN, | ||
47 | TILE_OPC_ADDLI, | ||
48 | TILE_OPC_ADDLI_SN, | ||
49 | TILE_OPC_ADDLIS, | ||
50 | TILE_OPC_ADDS, | ||
51 | TILE_OPC_ADDS_SN, | ||
52 | TILE_OPC_ADIFFB_U, | ||
53 | TILE_OPC_ADIFFB_U_SN, | ||
54 | TILE_OPC_ADIFFH, | ||
55 | TILE_OPC_ADIFFH_SN, | ||
56 | TILE_OPC_AND, | ||
57 | TILE_OPC_AND_SN, | ||
58 | TILE_OPC_ANDI, | ||
59 | TILE_OPC_ANDI_SN, | ||
60 | TILE_OPC_AULI, | ||
61 | TILE_OPC_AVGB_U, | ||
62 | TILE_OPC_AVGB_U_SN, | ||
63 | TILE_OPC_AVGH, | ||
64 | TILE_OPC_AVGH_SN, | ||
65 | TILE_OPC_BBNS, | ||
66 | TILE_OPC_BBNS_SN, | ||
67 | TILE_OPC_BBNST, | ||
68 | TILE_OPC_BBNST_SN, | ||
69 | TILE_OPC_BBS, | ||
70 | TILE_OPC_BBS_SN, | ||
71 | TILE_OPC_BBST, | ||
72 | TILE_OPC_BBST_SN, | ||
73 | TILE_OPC_BGEZ, | ||
74 | TILE_OPC_BGEZ_SN, | ||
75 | TILE_OPC_BGEZT, | ||
76 | TILE_OPC_BGEZT_SN, | ||
77 | TILE_OPC_BGZ, | ||
78 | TILE_OPC_BGZ_SN, | ||
79 | TILE_OPC_BGZT, | ||
80 | TILE_OPC_BGZT_SN, | ||
81 | TILE_OPC_BITX, | ||
82 | TILE_OPC_BITX_SN, | ||
83 | TILE_OPC_BLEZ, | ||
84 | TILE_OPC_BLEZ_SN, | ||
85 | TILE_OPC_BLEZT, | ||
86 | TILE_OPC_BLEZT_SN, | ||
87 | TILE_OPC_BLZ, | ||
88 | TILE_OPC_BLZ_SN, | ||
89 | TILE_OPC_BLZT, | ||
90 | TILE_OPC_BLZT_SN, | ||
91 | TILE_OPC_BNZ, | ||
92 | TILE_OPC_BNZ_SN, | ||
93 | TILE_OPC_BNZT, | ||
94 | TILE_OPC_BNZT_SN, | ||
95 | TILE_OPC_BYTEX, | ||
96 | TILE_OPC_BYTEX_SN, | ||
97 | TILE_OPC_BZ, | ||
98 | TILE_OPC_BZ_SN, | ||
99 | TILE_OPC_BZT, | ||
100 | TILE_OPC_BZT_SN, | ||
101 | TILE_OPC_CLZ, | ||
102 | TILE_OPC_CLZ_SN, | ||
103 | TILE_OPC_CRC32_32, | ||
104 | TILE_OPC_CRC32_32_SN, | ||
105 | TILE_OPC_CRC32_8, | ||
106 | TILE_OPC_CRC32_8_SN, | ||
107 | TILE_OPC_CTZ, | ||
108 | TILE_OPC_CTZ_SN, | ||
109 | TILE_OPC_DRAIN, | ||
110 | TILE_OPC_DTLBPR, | ||
111 | TILE_OPC_DWORD_ALIGN, | ||
112 | TILE_OPC_DWORD_ALIGN_SN, | ||
113 | TILE_OPC_FINV, | ||
114 | TILE_OPC_FLUSH, | ||
115 | TILE_OPC_FNOP, | ||
116 | TILE_OPC_ICOH, | ||
117 | TILE_OPC_ILL, | ||
118 | TILE_OPC_INTHB, | ||
119 | TILE_OPC_INTHB_SN, | ||
120 | TILE_OPC_INTHH, | ||
121 | TILE_OPC_INTHH_SN, | ||
122 | TILE_OPC_INTLB, | ||
123 | TILE_OPC_INTLB_SN, | ||
124 | TILE_OPC_INTLH, | ||
125 | TILE_OPC_INTLH_SN, | ||
126 | TILE_OPC_INV, | ||
127 | TILE_OPC_IRET, | ||
128 | TILE_OPC_JALB, | ||
129 | TILE_OPC_JALF, | ||
130 | TILE_OPC_JALR, | ||
131 | TILE_OPC_JALRP, | ||
132 | TILE_OPC_JB, | ||
133 | TILE_OPC_JF, | ||
134 | TILE_OPC_JR, | ||
135 | TILE_OPC_JRP, | ||
136 | TILE_OPC_LB, | ||
137 | TILE_OPC_LB_SN, | ||
138 | TILE_OPC_LB_U, | ||
139 | TILE_OPC_LB_U_SN, | ||
140 | TILE_OPC_LBADD, | ||
141 | TILE_OPC_LBADD_SN, | ||
142 | TILE_OPC_LBADD_U, | ||
143 | TILE_OPC_LBADD_U_SN, | ||
144 | TILE_OPC_LH, | ||
145 | TILE_OPC_LH_SN, | ||
146 | TILE_OPC_LH_U, | ||
147 | TILE_OPC_LH_U_SN, | ||
148 | TILE_OPC_LHADD, | ||
149 | TILE_OPC_LHADD_SN, | ||
150 | TILE_OPC_LHADD_U, | ||
151 | TILE_OPC_LHADD_U_SN, | ||
152 | TILE_OPC_LNK, | ||
153 | TILE_OPC_LNK_SN, | ||
154 | TILE_OPC_LW, | ||
155 | TILE_OPC_LW_SN, | ||
156 | TILE_OPC_LW_NA, | ||
157 | TILE_OPC_LW_NA_SN, | ||
158 | TILE_OPC_LWADD, | ||
159 | TILE_OPC_LWADD_SN, | ||
160 | TILE_OPC_LWADD_NA, | ||
161 | TILE_OPC_LWADD_NA_SN, | ||
162 | TILE_OPC_MAXB_U, | ||
163 | TILE_OPC_MAXB_U_SN, | ||
164 | TILE_OPC_MAXH, | ||
165 | TILE_OPC_MAXH_SN, | ||
166 | TILE_OPC_MAXIB_U, | ||
167 | TILE_OPC_MAXIB_U_SN, | ||
168 | TILE_OPC_MAXIH, | ||
169 | TILE_OPC_MAXIH_SN, | ||
170 | TILE_OPC_MF, | ||
171 | TILE_OPC_MFSPR, | ||
172 | TILE_OPC_MINB_U, | ||
173 | TILE_OPC_MINB_U_SN, | ||
174 | TILE_OPC_MINH, | ||
175 | TILE_OPC_MINH_SN, | ||
176 | TILE_OPC_MINIB_U, | ||
177 | TILE_OPC_MINIB_U_SN, | ||
178 | TILE_OPC_MINIH, | ||
179 | TILE_OPC_MINIH_SN, | ||
180 | TILE_OPC_MM, | ||
181 | TILE_OPC_MNZ, | ||
182 | TILE_OPC_MNZ_SN, | ||
183 | TILE_OPC_MNZB, | ||
184 | TILE_OPC_MNZB_SN, | ||
185 | TILE_OPC_MNZH, | ||
186 | TILE_OPC_MNZH_SN, | ||
187 | TILE_OPC_MTSPR, | ||
188 | TILE_OPC_MULHH_SS, | ||
189 | TILE_OPC_MULHH_SS_SN, | ||
190 | TILE_OPC_MULHH_SU, | ||
191 | TILE_OPC_MULHH_SU_SN, | ||
192 | TILE_OPC_MULHH_UU, | ||
193 | TILE_OPC_MULHH_UU_SN, | ||
194 | TILE_OPC_MULHHA_SS, | ||
195 | TILE_OPC_MULHHA_SS_SN, | ||
196 | TILE_OPC_MULHHA_SU, | ||
197 | TILE_OPC_MULHHA_SU_SN, | ||
198 | TILE_OPC_MULHHA_UU, | ||
199 | TILE_OPC_MULHHA_UU_SN, | ||
200 | TILE_OPC_MULHHSA_UU, | ||
201 | TILE_OPC_MULHHSA_UU_SN, | ||
202 | TILE_OPC_MULHL_SS, | ||
203 | TILE_OPC_MULHL_SS_SN, | ||
204 | TILE_OPC_MULHL_SU, | ||
205 | TILE_OPC_MULHL_SU_SN, | ||
206 | TILE_OPC_MULHL_US, | ||
207 | TILE_OPC_MULHL_US_SN, | ||
208 | TILE_OPC_MULHL_UU, | ||
209 | TILE_OPC_MULHL_UU_SN, | ||
210 | TILE_OPC_MULHLA_SS, | ||
211 | TILE_OPC_MULHLA_SS_SN, | ||
212 | TILE_OPC_MULHLA_SU, | ||
213 | TILE_OPC_MULHLA_SU_SN, | ||
214 | TILE_OPC_MULHLA_US, | ||
215 | TILE_OPC_MULHLA_US_SN, | ||
216 | TILE_OPC_MULHLA_UU, | ||
217 | TILE_OPC_MULHLA_UU_SN, | ||
218 | TILE_OPC_MULHLSA_UU, | ||
219 | TILE_OPC_MULHLSA_UU_SN, | ||
220 | TILE_OPC_MULLL_SS, | ||
221 | TILE_OPC_MULLL_SS_SN, | ||
222 | TILE_OPC_MULLL_SU, | ||
223 | TILE_OPC_MULLL_SU_SN, | ||
224 | TILE_OPC_MULLL_UU, | ||
225 | TILE_OPC_MULLL_UU_SN, | ||
226 | TILE_OPC_MULLLA_SS, | ||
227 | TILE_OPC_MULLLA_SS_SN, | ||
228 | TILE_OPC_MULLLA_SU, | ||
229 | TILE_OPC_MULLLA_SU_SN, | ||
230 | TILE_OPC_MULLLA_UU, | ||
231 | TILE_OPC_MULLLA_UU_SN, | ||
232 | TILE_OPC_MULLLSA_UU, | ||
233 | TILE_OPC_MULLLSA_UU_SN, | ||
234 | TILE_OPC_MVNZ, | ||
235 | TILE_OPC_MVNZ_SN, | ||
236 | TILE_OPC_MVZ, | ||
237 | TILE_OPC_MVZ_SN, | ||
238 | TILE_OPC_MZ, | ||
239 | TILE_OPC_MZ_SN, | ||
240 | TILE_OPC_MZB, | ||
241 | TILE_OPC_MZB_SN, | ||
242 | TILE_OPC_MZH, | ||
243 | TILE_OPC_MZH_SN, | ||
244 | TILE_OPC_NAP, | ||
245 | TILE_OPC_NOP, | ||
246 | TILE_OPC_NOR, | ||
247 | TILE_OPC_NOR_SN, | ||
248 | TILE_OPC_OR, | ||
249 | TILE_OPC_OR_SN, | ||
250 | TILE_OPC_ORI, | ||
251 | TILE_OPC_ORI_SN, | ||
252 | TILE_OPC_PACKBS_U, | ||
253 | TILE_OPC_PACKBS_U_SN, | ||
254 | TILE_OPC_PACKHB, | ||
255 | TILE_OPC_PACKHB_SN, | ||
256 | TILE_OPC_PACKHS, | ||
257 | TILE_OPC_PACKHS_SN, | ||
258 | TILE_OPC_PACKLB, | ||
259 | TILE_OPC_PACKLB_SN, | ||
260 | TILE_OPC_PCNT, | ||
261 | TILE_OPC_PCNT_SN, | ||
262 | TILE_OPC_RL, | ||
263 | TILE_OPC_RL_SN, | ||
264 | TILE_OPC_RLI, | ||
265 | TILE_OPC_RLI_SN, | ||
266 | TILE_OPC_S1A, | ||
267 | TILE_OPC_S1A_SN, | ||
268 | TILE_OPC_S2A, | ||
269 | TILE_OPC_S2A_SN, | ||
270 | TILE_OPC_S3A, | ||
271 | TILE_OPC_S3A_SN, | ||
272 | TILE_OPC_SADAB_U, | ||
273 | TILE_OPC_SADAB_U_SN, | ||
274 | TILE_OPC_SADAH, | ||
275 | TILE_OPC_SADAH_SN, | ||
276 | TILE_OPC_SADAH_U, | ||
277 | TILE_OPC_SADAH_U_SN, | ||
278 | TILE_OPC_SADB_U, | ||
279 | TILE_OPC_SADB_U_SN, | ||
280 | TILE_OPC_SADH, | ||
281 | TILE_OPC_SADH_SN, | ||
282 | TILE_OPC_SADH_U, | ||
283 | TILE_OPC_SADH_U_SN, | ||
284 | TILE_OPC_SB, | ||
285 | TILE_OPC_SBADD, | ||
286 | TILE_OPC_SEQ, | ||
287 | TILE_OPC_SEQ_SN, | ||
288 | TILE_OPC_SEQB, | ||
289 | TILE_OPC_SEQB_SN, | ||
290 | TILE_OPC_SEQH, | ||
291 | TILE_OPC_SEQH_SN, | ||
292 | TILE_OPC_SEQI, | ||
293 | TILE_OPC_SEQI_SN, | ||
294 | TILE_OPC_SEQIB, | ||
295 | TILE_OPC_SEQIB_SN, | ||
296 | TILE_OPC_SEQIH, | ||
297 | TILE_OPC_SEQIH_SN, | ||
298 | TILE_OPC_SH, | ||
299 | TILE_OPC_SHADD, | ||
300 | TILE_OPC_SHL, | ||
301 | TILE_OPC_SHL_SN, | ||
302 | TILE_OPC_SHLB, | ||
303 | TILE_OPC_SHLB_SN, | ||
304 | TILE_OPC_SHLH, | ||
305 | TILE_OPC_SHLH_SN, | ||
306 | TILE_OPC_SHLI, | ||
307 | TILE_OPC_SHLI_SN, | ||
308 | TILE_OPC_SHLIB, | ||
309 | TILE_OPC_SHLIB_SN, | ||
310 | TILE_OPC_SHLIH, | ||
311 | TILE_OPC_SHLIH_SN, | ||
312 | TILE_OPC_SHR, | ||
313 | TILE_OPC_SHR_SN, | ||
314 | TILE_OPC_SHRB, | ||
315 | TILE_OPC_SHRB_SN, | ||
316 | TILE_OPC_SHRH, | ||
317 | TILE_OPC_SHRH_SN, | ||
318 | TILE_OPC_SHRI, | ||
319 | TILE_OPC_SHRI_SN, | ||
320 | TILE_OPC_SHRIB, | ||
321 | TILE_OPC_SHRIB_SN, | ||
322 | TILE_OPC_SHRIH, | ||
323 | TILE_OPC_SHRIH_SN, | ||
324 | TILE_OPC_SLT, | ||
325 | TILE_OPC_SLT_SN, | ||
326 | TILE_OPC_SLT_U, | ||
327 | TILE_OPC_SLT_U_SN, | ||
328 | TILE_OPC_SLTB, | ||
329 | TILE_OPC_SLTB_SN, | ||
330 | TILE_OPC_SLTB_U, | ||
331 | TILE_OPC_SLTB_U_SN, | ||
332 | TILE_OPC_SLTE, | ||
333 | TILE_OPC_SLTE_SN, | ||
334 | TILE_OPC_SLTE_U, | ||
335 | TILE_OPC_SLTE_U_SN, | ||
336 | TILE_OPC_SLTEB, | ||
337 | TILE_OPC_SLTEB_SN, | ||
338 | TILE_OPC_SLTEB_U, | ||
339 | TILE_OPC_SLTEB_U_SN, | ||
340 | TILE_OPC_SLTEH, | ||
341 | TILE_OPC_SLTEH_SN, | ||
342 | TILE_OPC_SLTEH_U, | ||
343 | TILE_OPC_SLTEH_U_SN, | ||
344 | TILE_OPC_SLTH, | ||
345 | TILE_OPC_SLTH_SN, | ||
346 | TILE_OPC_SLTH_U, | ||
347 | TILE_OPC_SLTH_U_SN, | ||
348 | TILE_OPC_SLTI, | ||
349 | TILE_OPC_SLTI_SN, | ||
350 | TILE_OPC_SLTI_U, | ||
351 | TILE_OPC_SLTI_U_SN, | ||
352 | TILE_OPC_SLTIB, | ||
353 | TILE_OPC_SLTIB_SN, | ||
354 | TILE_OPC_SLTIB_U, | ||
355 | TILE_OPC_SLTIB_U_SN, | ||
356 | TILE_OPC_SLTIH, | ||
357 | TILE_OPC_SLTIH_SN, | ||
358 | TILE_OPC_SLTIH_U, | ||
359 | TILE_OPC_SLTIH_U_SN, | ||
360 | TILE_OPC_SNE, | ||
361 | TILE_OPC_SNE_SN, | ||
362 | TILE_OPC_SNEB, | ||
363 | TILE_OPC_SNEB_SN, | ||
364 | TILE_OPC_SNEH, | ||
365 | TILE_OPC_SNEH_SN, | ||
366 | TILE_OPC_SRA, | ||
367 | TILE_OPC_SRA_SN, | ||
368 | TILE_OPC_SRAB, | ||
369 | TILE_OPC_SRAB_SN, | ||
370 | TILE_OPC_SRAH, | ||
371 | TILE_OPC_SRAH_SN, | ||
372 | TILE_OPC_SRAI, | ||
373 | TILE_OPC_SRAI_SN, | ||
374 | TILE_OPC_SRAIB, | ||
375 | TILE_OPC_SRAIB_SN, | ||
376 | TILE_OPC_SRAIH, | ||
377 | TILE_OPC_SRAIH_SN, | ||
378 | TILE_OPC_SUB, | ||
379 | TILE_OPC_SUB_SN, | ||
380 | TILE_OPC_SUBB, | ||
381 | TILE_OPC_SUBB_SN, | ||
382 | TILE_OPC_SUBBS_U, | ||
383 | TILE_OPC_SUBBS_U_SN, | ||
384 | TILE_OPC_SUBH, | ||
385 | TILE_OPC_SUBH_SN, | ||
386 | TILE_OPC_SUBHS, | ||
387 | TILE_OPC_SUBHS_SN, | ||
388 | TILE_OPC_SUBS, | ||
389 | TILE_OPC_SUBS_SN, | ||
390 | TILE_OPC_SW, | ||
391 | TILE_OPC_SWADD, | ||
392 | TILE_OPC_SWINT0, | ||
393 | TILE_OPC_SWINT1, | ||
394 | TILE_OPC_SWINT2, | ||
395 | TILE_OPC_SWINT3, | ||
396 | TILE_OPC_TBLIDXB0, | ||
397 | TILE_OPC_TBLIDXB0_SN, | ||
398 | TILE_OPC_TBLIDXB1, | ||
399 | TILE_OPC_TBLIDXB1_SN, | ||
400 | TILE_OPC_TBLIDXB2, | ||
401 | TILE_OPC_TBLIDXB2_SN, | ||
402 | TILE_OPC_TBLIDXB3, | ||
403 | TILE_OPC_TBLIDXB3_SN, | ||
404 | TILE_OPC_TNS, | ||
405 | TILE_OPC_TNS_SN, | ||
406 | TILE_OPC_WH64, | ||
407 | TILE_OPC_XOR, | ||
408 | TILE_OPC_XOR_SN, | ||
409 | TILE_OPC_XORI, | ||
410 | TILE_OPC_XORI_SN, | ||
411 | TILE_OPC_NONE | ||
412 | } tile_mnemonic; | ||
413 | |||
414 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
415 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
416 | |||
417 | |||
418 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
419 | |||
420 | #define TILE_ELF_NAME "elf32-tilepro" | ||
421 | |||
422 | enum | ||
423 | { | ||
424 | TILE_SN_MAX_OPERANDS = 6 /* route */ | ||
425 | }; | ||
426 | |||
427 | typedef enum | ||
428 | { | ||
429 | TILE_SN_OPC_BZ, | ||
430 | TILE_SN_OPC_BNZ, | ||
431 | TILE_SN_OPC_JRR, | ||
432 | TILE_SN_OPC_FNOP, | ||
433 | TILE_SN_OPC_BLZ, | ||
434 | TILE_SN_OPC_NOP, | ||
435 | TILE_SN_OPC_MOVEI, | ||
436 | TILE_SN_OPC_MOVE, | ||
437 | TILE_SN_OPC_BGEZ, | ||
438 | TILE_SN_OPC_JR, | ||
439 | TILE_SN_OPC_BLEZ, | ||
440 | TILE_SN_OPC_BBNS, | ||
441 | TILE_SN_OPC_JALRR, | ||
442 | TILE_SN_OPC_BPT, | ||
443 | TILE_SN_OPC_JALR, | ||
444 | TILE_SN_OPC_SHR1, | ||
445 | TILE_SN_OPC_BGZ, | ||
446 | TILE_SN_OPC_BBS, | ||
447 | TILE_SN_OPC_SHL8II, | ||
448 | TILE_SN_OPC_ADDI, | ||
449 | TILE_SN_OPC_HALT, | ||
450 | TILE_SN_OPC_ROUTE, | ||
451 | TILE_SN_OPC_NONE | ||
452 | } tile_sn_mnemonic; | ||
453 | |||
454 | extern const unsigned char tile_sn_route_encode[6 * 6 * 6]; | ||
455 | extern const signed char tile_sn_route_decode[256][3]; | ||
456 | extern const char tile_sn_direction_names[6][5]; | ||
457 | extern const signed char tile_sn_dest_map[6][6]; | ||
458 | |||
459 | |||
460 | static __inline unsigned int | ||
461 | get_BrOff_SN(tile_bundle_bits num) | ||
462 | { | ||
463 | const unsigned int n = (unsigned int)num; | ||
464 | return (((n >> 0)) & 0x3ff); | ||
465 | } | ||
466 | |||
467 | static __inline unsigned int | ||
468 | get_BrOff_X1(tile_bundle_bits n) | ||
469 | { | ||
470 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
471 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
472 | } | ||
473 | |||
474 | static __inline unsigned int | ||
475 | get_BrType_X1(tile_bundle_bits n) | ||
476 | { | ||
477 | return (((unsigned int)(n >> 31)) & 0xf); | ||
478 | } | ||
479 | |||
480 | static __inline unsigned int | ||
481 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
482 | { | ||
483 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
484 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
485 | } | ||
486 | |||
487 | static __inline unsigned int | ||
488 | get_Dest_SN(tile_bundle_bits num) | ||
489 | { | ||
490 | const unsigned int n = (unsigned int)num; | ||
491 | return (((n >> 2)) & 0x3); | ||
492 | } | ||
493 | |||
494 | static __inline unsigned int | ||
495 | get_Dest_X0(tile_bundle_bits num) | ||
496 | { | ||
497 | const unsigned int n = (unsigned int)num; | ||
498 | return (((n >> 0)) & 0x3f); | ||
499 | } | ||
500 | |||
501 | static __inline unsigned int | ||
502 | get_Dest_X1(tile_bundle_bits n) | ||
503 | { | ||
504 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
505 | } | ||
506 | |||
507 | static __inline unsigned int | ||
508 | get_Dest_Y0(tile_bundle_bits num) | ||
509 | { | ||
510 | const unsigned int n = (unsigned int)num; | ||
511 | return (((n >> 0)) & 0x3f); | ||
512 | } | ||
513 | |||
514 | static __inline unsigned int | ||
515 | get_Dest_Y1(tile_bundle_bits n) | ||
516 | { | ||
517 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
518 | } | ||
519 | |||
520 | static __inline unsigned int | ||
521 | get_Imm16_X0(tile_bundle_bits num) | ||
522 | { | ||
523 | const unsigned int n = (unsigned int)num; | ||
524 | return (((n >> 12)) & 0xffff); | ||
525 | } | ||
526 | |||
527 | static __inline unsigned int | ||
528 | get_Imm16_X1(tile_bundle_bits n) | ||
529 | { | ||
530 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
531 | } | ||
532 | |||
533 | static __inline unsigned int | ||
534 | get_Imm8_SN(tile_bundle_bits num) | ||
535 | { | ||
536 | const unsigned int n = (unsigned int)num; | ||
537 | return (((n >> 0)) & 0xff); | ||
538 | } | ||
539 | |||
540 | static __inline unsigned int | ||
541 | get_Imm8_X0(tile_bundle_bits num) | ||
542 | { | ||
543 | const unsigned int n = (unsigned int)num; | ||
544 | return (((n >> 12)) & 0xff); | ||
545 | } | ||
546 | |||
547 | static __inline unsigned int | ||
548 | get_Imm8_X1(tile_bundle_bits n) | ||
549 | { | ||
550 | return (((unsigned int)(n >> 43)) & 0xff); | ||
551 | } | ||
552 | |||
553 | static __inline unsigned int | ||
554 | get_Imm8_Y0(tile_bundle_bits num) | ||
555 | { | ||
556 | const unsigned int n = (unsigned int)num; | ||
557 | return (((n >> 12)) & 0xff); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_Imm8_Y1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0xff); | ||
564 | } | ||
565 | |||
566 | static __inline unsigned int | ||
567 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
568 | { | ||
569 | const unsigned int n = (unsigned int)num; | ||
570 | return (((n >> 20)) & 0x7f); | ||
571 | } | ||
572 | |||
573 | static __inline unsigned int | ||
574 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
575 | { | ||
576 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
577 | } | ||
578 | |||
579 | static __inline unsigned int | ||
580 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
581 | { | ||
582 | const unsigned int n = (unsigned int)num; | ||
583 | return (((n >> 8)) & 0x3); | ||
584 | } | ||
585 | |||
586 | static __inline unsigned int | ||
587 | get_JOffLong_X1(tile_bundle_bits n) | ||
588 | { | ||
589 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
590 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
591 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
592 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
593 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
594 | } | ||
595 | |||
596 | static __inline unsigned int | ||
597 | get_JOff_X1(tile_bundle_bits n) | ||
598 | { | ||
599 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
600 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
601 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
602 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
603 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
604 | } | ||
605 | |||
606 | static __inline unsigned int | ||
607 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
608 | { | ||
609 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
610 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
611 | } | ||
612 | |||
613 | static __inline unsigned int | ||
614 | get_MMEnd_X0(tile_bundle_bits num) | ||
615 | { | ||
616 | const unsigned int n = (unsigned int)num; | ||
617 | return (((n >> 18)) & 0x1f); | ||
618 | } | ||
619 | |||
620 | static __inline unsigned int | ||
621 | get_MMEnd_X1(tile_bundle_bits n) | ||
622 | { | ||
623 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
624 | } | ||
625 | |||
626 | static __inline unsigned int | ||
627 | get_MMStart_X0(tile_bundle_bits num) | ||
628 | { | ||
629 | const unsigned int n = (unsigned int)num; | ||
630 | return (((n >> 23)) & 0x1f); | ||
631 | } | ||
632 | |||
633 | static __inline unsigned int | ||
634 | get_MMStart_X1(tile_bundle_bits n) | ||
635 | { | ||
636 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
637 | } | ||
638 | |||
639 | static __inline unsigned int | ||
640 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
641 | { | ||
642 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
643 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
644 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
645 | } | ||
646 | |||
647 | static __inline unsigned int | ||
648 | get_Mode(tile_bundle_bits n) | ||
649 | { | ||
650 | return (((unsigned int)(n >> 63)) & 0x1); | ||
651 | } | ||
652 | |||
653 | static __inline unsigned int | ||
654 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
655 | { | ||
656 | const unsigned int n = (unsigned int)num; | ||
657 | return (((n >> 0)) & 0xf); | ||
658 | } | ||
659 | |||
660 | static __inline unsigned int | ||
661 | get_Opcode_SN(tile_bundle_bits num) | ||
662 | { | ||
663 | const unsigned int n = (unsigned int)num; | ||
664 | return (((n >> 10)) & 0x3f); | ||
665 | } | ||
666 | |||
667 | static __inline unsigned int | ||
668 | get_Opcode_X0(tile_bundle_bits num) | ||
669 | { | ||
670 | const unsigned int n = (unsigned int)num; | ||
671 | return (((n >> 28)) & 0x7); | ||
672 | } | ||
673 | |||
674 | static __inline unsigned int | ||
675 | get_Opcode_X1(tile_bundle_bits n) | ||
676 | { | ||
677 | return (((unsigned int)(n >> 59)) & 0xf); | ||
678 | } | ||
679 | |||
680 | static __inline unsigned int | ||
681 | get_Opcode_Y0(tile_bundle_bits num) | ||
682 | { | ||
683 | const unsigned int n = (unsigned int)num; | ||
684 | return (((n >> 27)) & 0xf); | ||
685 | } | ||
686 | |||
687 | static __inline unsigned int | ||
688 | get_Opcode_Y1(tile_bundle_bits n) | ||
689 | { | ||
690 | return (((unsigned int)(n >> 59)) & 0xf); | ||
691 | } | ||
692 | |||
693 | static __inline unsigned int | ||
694 | get_Opcode_Y2(tile_bundle_bits n) | ||
695 | { | ||
696 | return (((unsigned int)(n >> 56)) & 0x7); | ||
697 | } | ||
698 | |||
699 | static __inline unsigned int | ||
700 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
701 | { | ||
702 | const unsigned int n = (unsigned int)num; | ||
703 | return (((n >> 4)) & 0xf); | ||
704 | } | ||
705 | |||
706 | static __inline unsigned int | ||
707 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
708 | { | ||
709 | const unsigned int n = (unsigned int)num; | ||
710 | return (((n >> 18)) & 0x1ff); | ||
711 | } | ||
712 | |||
713 | static __inline unsigned int | ||
714 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
715 | { | ||
716 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
717 | } | ||
718 | |||
719 | static __inline unsigned int | ||
720 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
721 | { | ||
722 | const unsigned int n = (unsigned int)num; | ||
723 | return (((n >> 18)) & 0x3); | ||
724 | } | ||
725 | |||
726 | static __inline unsigned int | ||
727 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
728 | { | ||
729 | return (((unsigned int)(n >> 49)) & 0x3); | ||
730 | } | ||
731 | |||
732 | static __inline unsigned int | ||
733 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
734 | { | ||
735 | const unsigned int n = (unsigned int)num; | ||
736 | return (((n >> 0)) & 0x3ff); | ||
737 | } | ||
738 | |||
739 | static __inline unsigned int | ||
740 | get_S_X0(tile_bundle_bits num) | ||
741 | { | ||
742 | const unsigned int n = (unsigned int)num; | ||
743 | return (((n >> 27)) & 0x1); | ||
744 | } | ||
745 | |||
746 | static __inline unsigned int | ||
747 | get_S_X1(tile_bundle_bits n) | ||
748 | { | ||
749 | return (((unsigned int)(n >> 58)) & 0x1); | ||
750 | } | ||
751 | |||
752 | static __inline unsigned int | ||
753 | get_ShAmt_X0(tile_bundle_bits num) | ||
754 | { | ||
755 | const unsigned int n = (unsigned int)num; | ||
756 | return (((n >> 12)) & 0x1f); | ||
757 | } | ||
758 | |||
759 | static __inline unsigned int | ||
760 | get_ShAmt_X1(tile_bundle_bits n) | ||
761 | { | ||
762 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
763 | } | ||
764 | |||
765 | static __inline unsigned int | ||
766 | get_ShAmt_Y0(tile_bundle_bits num) | ||
767 | { | ||
768 | const unsigned int n = (unsigned int)num; | ||
769 | return (((n >> 12)) & 0x1f); | ||
770 | } | ||
771 | |||
772 | static __inline unsigned int | ||
773 | get_ShAmt_Y1(tile_bundle_bits n) | ||
774 | { | ||
775 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
776 | } | ||
777 | |||
778 | static __inline unsigned int | ||
779 | get_SrcA_X0(tile_bundle_bits num) | ||
780 | { | ||
781 | const unsigned int n = (unsigned int)num; | ||
782 | return (((n >> 6)) & 0x3f); | ||
783 | } | ||
784 | |||
785 | static __inline unsigned int | ||
786 | get_SrcA_X1(tile_bundle_bits n) | ||
787 | { | ||
788 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
789 | } | ||
790 | |||
791 | static __inline unsigned int | ||
792 | get_SrcA_Y0(tile_bundle_bits num) | ||
793 | { | ||
794 | const unsigned int n = (unsigned int)num; | ||
795 | return (((n >> 6)) & 0x3f); | ||
796 | } | ||
797 | |||
798 | static __inline unsigned int | ||
799 | get_SrcA_Y1(tile_bundle_bits n) | ||
800 | { | ||
801 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
802 | } | ||
803 | |||
804 | static __inline unsigned int | ||
805 | get_SrcA_Y2(tile_bundle_bits n) | ||
806 | { | ||
807 | return (((n >> 26)) & 0x00000001) | | ||
808 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
809 | } | ||
810 | |||
811 | static __inline unsigned int | ||
812 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
813 | { | ||
814 | const unsigned int n = (unsigned int)num; | ||
815 | return (((n >> 20)) & 0x3f); | ||
816 | } | ||
817 | |||
818 | static __inline unsigned int | ||
819 | get_SrcB_X0(tile_bundle_bits num) | ||
820 | { | ||
821 | const unsigned int n = (unsigned int)num; | ||
822 | return (((n >> 12)) & 0x3f); | ||
823 | } | ||
824 | |||
825 | static __inline unsigned int | ||
826 | get_SrcB_X1(tile_bundle_bits n) | ||
827 | { | ||
828 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
829 | } | ||
830 | |||
831 | static __inline unsigned int | ||
832 | get_SrcB_Y0(tile_bundle_bits num) | ||
833 | { | ||
834 | const unsigned int n = (unsigned int)num; | ||
835 | return (((n >> 12)) & 0x3f); | ||
836 | } | ||
837 | |||
838 | static __inline unsigned int | ||
839 | get_SrcB_Y1(tile_bundle_bits n) | ||
840 | { | ||
841 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
842 | } | ||
843 | |||
844 | static __inline unsigned int | ||
845 | get_Src_SN(tile_bundle_bits num) | ||
846 | { | ||
847 | const unsigned int n = (unsigned int)num; | ||
848 | return (((n >> 0)) & 0x3); | ||
849 | } | ||
850 | |||
851 | static __inline unsigned int | ||
852 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
853 | { | ||
854 | const unsigned int n = (unsigned int)num; | ||
855 | return (((n >> 12)) & 0x1f); | ||
856 | } | ||
857 | |||
858 | static __inline unsigned int | ||
859 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
860 | { | ||
861 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
862 | } | ||
863 | |||
864 | static __inline unsigned int | ||
865 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
866 | { | ||
867 | const unsigned int n = (unsigned int)num; | ||
868 | return (((n >> 12)) & 0x1f); | ||
869 | } | ||
870 | |||
871 | static __inline unsigned int | ||
872 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
873 | { | ||
874 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
875 | } | ||
876 | |||
877 | static __inline unsigned int | ||
878 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return (((n >> 17)) & 0x3ff); | ||
882 | } | ||
883 | |||
884 | static __inline unsigned int | ||
885 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
886 | { | ||
887 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
888 | } | ||
889 | |||
890 | static __inline unsigned int | ||
891 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
892 | { | ||
893 | const unsigned int n = (unsigned int)num; | ||
894 | return (((n >> 17)) & 0x7); | ||
895 | } | ||
896 | |||
897 | static __inline unsigned int | ||
898 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
899 | { | ||
900 | return (((unsigned int)(n >> 48)) & 0x7); | ||
901 | } | ||
902 | |||
903 | |||
904 | static __inline int | ||
905 | sign_extend(int n, int num_bits) | ||
906 | { | ||
907 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
908 | return (n << shift) >> shift; | ||
909 | } | ||
910 | |||
911 | |||
912 | |||
913 | static __inline tile_bundle_bits | ||
914 | create_BrOff_SN(int num) | ||
915 | { | ||
916 | const unsigned int n = (unsigned int)num; | ||
917 | return ((n & 0x3ff) << 0); | ||
918 | } | ||
919 | |||
920 | static __inline tile_bundle_bits | ||
921 | create_BrOff_X1(int num) | ||
922 | { | ||
923 | const unsigned int n = (unsigned int)num; | ||
924 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
925 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_BrType_X1(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Imm8_X1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
940 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
941 | } | ||
942 | |||
943 | static __inline tile_bundle_bits | ||
944 | create_Dest_SN(int num) | ||
945 | { | ||
946 | const unsigned int n = (unsigned int)num; | ||
947 | return ((n & 0x3) << 2); | ||
948 | } | ||
949 | |||
950 | static __inline tile_bundle_bits | ||
951 | create_Dest_X0(int num) | ||
952 | { | ||
953 | const unsigned int n = (unsigned int)num; | ||
954 | return ((n & 0x3f) << 0); | ||
955 | } | ||
956 | |||
957 | static __inline tile_bundle_bits | ||
958 | create_Dest_X1(int num) | ||
959 | { | ||
960 | const unsigned int n = (unsigned int)num; | ||
961 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
962 | } | ||
963 | |||
964 | static __inline tile_bundle_bits | ||
965 | create_Dest_Y0(int num) | ||
966 | { | ||
967 | const unsigned int n = (unsigned int)num; | ||
968 | return ((n & 0x3f) << 0); | ||
969 | } | ||
970 | |||
971 | static __inline tile_bundle_bits | ||
972 | create_Dest_Y1(int num) | ||
973 | { | ||
974 | const unsigned int n = (unsigned int)num; | ||
975 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
976 | } | ||
977 | |||
978 | static __inline tile_bundle_bits | ||
979 | create_Imm16_X0(int num) | ||
980 | { | ||
981 | const unsigned int n = (unsigned int)num; | ||
982 | return ((n & 0xffff) << 12); | ||
983 | } | ||
984 | |||
985 | static __inline tile_bundle_bits | ||
986 | create_Imm16_X1(int num) | ||
987 | { | ||
988 | const unsigned int n = (unsigned int)num; | ||
989 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
990 | } | ||
991 | |||
992 | static __inline tile_bundle_bits | ||
993 | create_Imm8_SN(int num) | ||
994 | { | ||
995 | const unsigned int n = (unsigned int)num; | ||
996 | return ((n & 0xff) << 0); | ||
997 | } | ||
998 | |||
999 | static __inline tile_bundle_bits | ||
1000 | create_Imm8_X0(int num) | ||
1001 | { | ||
1002 | const unsigned int n = (unsigned int)num; | ||
1003 | return ((n & 0xff) << 12); | ||
1004 | } | ||
1005 | |||
1006 | static __inline tile_bundle_bits | ||
1007 | create_Imm8_X1(int num) | ||
1008 | { | ||
1009 | const unsigned int n = (unsigned int)num; | ||
1010 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1011 | } | ||
1012 | |||
1013 | static __inline tile_bundle_bits | ||
1014 | create_Imm8_Y0(int num) | ||
1015 | { | ||
1016 | const unsigned int n = (unsigned int)num; | ||
1017 | return ((n & 0xff) << 12); | ||
1018 | } | ||
1019 | |||
1020 | static __inline tile_bundle_bits | ||
1021 | create_Imm8_Y1(int num) | ||
1022 | { | ||
1023 | const unsigned int n = (unsigned int)num; | ||
1024 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1025 | } | ||
1026 | |||
1027 | static __inline tile_bundle_bits | ||
1028 | create_ImmOpcodeExtension_X0(int num) | ||
1029 | { | ||
1030 | const unsigned int n = (unsigned int)num; | ||
1031 | return ((n & 0x7f) << 20); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_ImmOpcodeExtension_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1039 | } | ||
1040 | |||
1041 | static __inline tile_bundle_bits | ||
1042 | create_ImmRROpcodeExtension_SN(int num) | ||
1043 | { | ||
1044 | const unsigned int n = (unsigned int)num; | ||
1045 | return ((n & 0x3) << 8); | ||
1046 | } | ||
1047 | |||
1048 | static __inline tile_bundle_bits | ||
1049 | create_JOffLong_X1(int num) | ||
1050 | { | ||
1051 | const unsigned int n = (unsigned int)num; | ||
1052 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1053 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1054 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1055 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1056 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1057 | } | ||
1058 | |||
1059 | static __inline tile_bundle_bits | ||
1060 | create_JOff_X1(int num) | ||
1061 | { | ||
1062 | const unsigned int n = (unsigned int)num; | ||
1063 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1064 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1065 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1066 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1067 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MF_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1075 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1076 | } | ||
1077 | |||
1078 | static __inline tile_bundle_bits | ||
1079 | create_MMEnd_X0(int num) | ||
1080 | { | ||
1081 | const unsigned int n = (unsigned int)num; | ||
1082 | return ((n & 0x1f) << 18); | ||
1083 | } | ||
1084 | |||
1085 | static __inline tile_bundle_bits | ||
1086 | create_MMEnd_X1(int num) | ||
1087 | { | ||
1088 | const unsigned int n = (unsigned int)num; | ||
1089 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1090 | } | ||
1091 | |||
1092 | static __inline tile_bundle_bits | ||
1093 | create_MMStart_X0(int num) | ||
1094 | { | ||
1095 | const unsigned int n = (unsigned int)num; | ||
1096 | return ((n & 0x1f) << 23); | ||
1097 | } | ||
1098 | |||
1099 | static __inline tile_bundle_bits | ||
1100 | create_MMStart_X1(int num) | ||
1101 | { | ||
1102 | const unsigned int n = (unsigned int)num; | ||
1103 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1104 | } | ||
1105 | |||
1106 | static __inline tile_bundle_bits | ||
1107 | create_MT_Imm15_X1(int num) | ||
1108 | { | ||
1109 | const unsigned int n = (unsigned int)num; | ||
1110 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1111 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1112 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1113 | } | ||
1114 | |||
1115 | static __inline tile_bundle_bits | ||
1116 | create_Mode(int num) | ||
1117 | { | ||
1118 | const unsigned int n = (unsigned int)num; | ||
1119 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1120 | } | ||
1121 | |||
1122 | static __inline tile_bundle_bits | ||
1123 | create_NoRegOpcodeExtension_SN(int num) | ||
1124 | { | ||
1125 | const unsigned int n = (unsigned int)num; | ||
1126 | return ((n & 0xf) << 0); | ||
1127 | } | ||
1128 | |||
1129 | static __inline tile_bundle_bits | ||
1130 | create_Opcode_SN(int num) | ||
1131 | { | ||
1132 | const unsigned int n = (unsigned int)num; | ||
1133 | return ((n & 0x3f) << 10); | ||
1134 | } | ||
1135 | |||
1136 | static __inline tile_bundle_bits | ||
1137 | create_Opcode_X0(int num) | ||
1138 | { | ||
1139 | const unsigned int n = (unsigned int)num; | ||
1140 | return ((n & 0x7) << 28); | ||
1141 | } | ||
1142 | |||
1143 | static __inline tile_bundle_bits | ||
1144 | create_Opcode_X1(int num) | ||
1145 | { | ||
1146 | const unsigned int n = (unsigned int)num; | ||
1147 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1148 | } | ||
1149 | |||
1150 | static __inline tile_bundle_bits | ||
1151 | create_Opcode_Y0(int num) | ||
1152 | { | ||
1153 | const unsigned int n = (unsigned int)num; | ||
1154 | return ((n & 0xf) << 27); | ||
1155 | } | ||
1156 | |||
1157 | static __inline tile_bundle_bits | ||
1158 | create_Opcode_Y1(int num) | ||
1159 | { | ||
1160 | const unsigned int n = (unsigned int)num; | ||
1161 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1162 | } | ||
1163 | |||
1164 | static __inline tile_bundle_bits | ||
1165 | create_Opcode_Y2(int num) | ||
1166 | { | ||
1167 | const unsigned int n = (unsigned int)num; | ||
1168 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1169 | } | ||
1170 | |||
1171 | static __inline tile_bundle_bits | ||
1172 | create_RROpcodeExtension_SN(int num) | ||
1173 | { | ||
1174 | const unsigned int n = (unsigned int)num; | ||
1175 | return ((n & 0xf) << 4); | ||
1176 | } | ||
1177 | |||
1178 | static __inline tile_bundle_bits | ||
1179 | create_RRROpcodeExtension_X0(int num) | ||
1180 | { | ||
1181 | const unsigned int n = (unsigned int)num; | ||
1182 | return ((n & 0x1ff) << 18); | ||
1183 | } | ||
1184 | |||
1185 | static __inline tile_bundle_bits | ||
1186 | create_RRROpcodeExtension_X1(int num) | ||
1187 | { | ||
1188 | const unsigned int n = (unsigned int)num; | ||
1189 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1190 | } | ||
1191 | |||
1192 | static __inline tile_bundle_bits | ||
1193 | create_RRROpcodeExtension_Y0(int num) | ||
1194 | { | ||
1195 | const unsigned int n = (unsigned int)num; | ||
1196 | return ((n & 0x3) << 18); | ||
1197 | } | ||
1198 | |||
1199 | static __inline tile_bundle_bits | ||
1200 | create_RRROpcodeExtension_Y1(int num) | ||
1201 | { | ||
1202 | const unsigned int n = (unsigned int)num; | ||
1203 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1204 | } | ||
1205 | |||
1206 | static __inline tile_bundle_bits | ||
1207 | create_RouteOpcodeExtension_SN(int num) | ||
1208 | { | ||
1209 | const unsigned int n = (unsigned int)num; | ||
1210 | return ((n & 0x3ff) << 0); | ||
1211 | } | ||
1212 | |||
1213 | static __inline tile_bundle_bits | ||
1214 | create_S_X0(int num) | ||
1215 | { | ||
1216 | const unsigned int n = (unsigned int)num; | ||
1217 | return ((n & 0x1) << 27); | ||
1218 | } | ||
1219 | |||
1220 | static __inline tile_bundle_bits | ||
1221 | create_S_X1(int num) | ||
1222 | { | ||
1223 | const unsigned int n = (unsigned int)num; | ||
1224 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1225 | } | ||
1226 | |||
1227 | static __inline tile_bundle_bits | ||
1228 | create_ShAmt_X0(int num) | ||
1229 | { | ||
1230 | const unsigned int n = (unsigned int)num; | ||
1231 | return ((n & 0x1f) << 12); | ||
1232 | } | ||
1233 | |||
1234 | static __inline tile_bundle_bits | ||
1235 | create_ShAmt_X1(int num) | ||
1236 | { | ||
1237 | const unsigned int n = (unsigned int)num; | ||
1238 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1239 | } | ||
1240 | |||
1241 | static __inline tile_bundle_bits | ||
1242 | create_ShAmt_Y0(int num) | ||
1243 | { | ||
1244 | const unsigned int n = (unsigned int)num; | ||
1245 | return ((n & 0x1f) << 12); | ||
1246 | } | ||
1247 | |||
1248 | static __inline tile_bundle_bits | ||
1249 | create_ShAmt_Y1(int num) | ||
1250 | { | ||
1251 | const unsigned int n = (unsigned int)num; | ||
1252 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcA_X0(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 6); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcA_X1(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcA_Y0(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return ((n & 0x3f) << 6); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcA_Y1(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcA_Y2(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return ((n & 0x00000001) << 26) | | ||
1288 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1289 | } | ||
1290 | |||
1291 | static __inline tile_bundle_bits | ||
1292 | create_SrcBDest_Y2(int num) | ||
1293 | { | ||
1294 | const unsigned int n = (unsigned int)num; | ||
1295 | return ((n & 0x3f) << 20); | ||
1296 | } | ||
1297 | |||
1298 | static __inline tile_bundle_bits | ||
1299 | create_SrcB_X0(int num) | ||
1300 | { | ||
1301 | const unsigned int n = (unsigned int)num; | ||
1302 | return ((n & 0x3f) << 12); | ||
1303 | } | ||
1304 | |||
1305 | static __inline tile_bundle_bits | ||
1306 | create_SrcB_X1(int num) | ||
1307 | { | ||
1308 | const unsigned int n = (unsigned int)num; | ||
1309 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1310 | } | ||
1311 | |||
1312 | static __inline tile_bundle_bits | ||
1313 | create_SrcB_Y0(int num) | ||
1314 | { | ||
1315 | const unsigned int n = (unsigned int)num; | ||
1316 | return ((n & 0x3f) << 12); | ||
1317 | } | ||
1318 | |||
1319 | static __inline tile_bundle_bits | ||
1320 | create_SrcB_Y1(int num) | ||
1321 | { | ||
1322 | const unsigned int n = (unsigned int)num; | ||
1323 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1324 | } | ||
1325 | |||
1326 | static __inline tile_bundle_bits | ||
1327 | create_Src_SN(int num) | ||
1328 | { | ||
1329 | const unsigned int n = (unsigned int)num; | ||
1330 | return ((n & 0x3) << 0); | ||
1331 | } | ||
1332 | |||
1333 | static __inline tile_bundle_bits | ||
1334 | create_UnOpcodeExtension_X0(int num) | ||
1335 | { | ||
1336 | const unsigned int n = (unsigned int)num; | ||
1337 | return ((n & 0x1f) << 12); | ||
1338 | } | ||
1339 | |||
1340 | static __inline tile_bundle_bits | ||
1341 | create_UnOpcodeExtension_X1(int num) | ||
1342 | { | ||
1343 | const unsigned int n = (unsigned int)num; | ||
1344 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1345 | } | ||
1346 | |||
1347 | static __inline tile_bundle_bits | ||
1348 | create_UnOpcodeExtension_Y0(int num) | ||
1349 | { | ||
1350 | const unsigned int n = (unsigned int)num; | ||
1351 | return ((n & 0x1f) << 12); | ||
1352 | } | ||
1353 | |||
1354 | static __inline tile_bundle_bits | ||
1355 | create_UnOpcodeExtension_Y1(int num) | ||
1356 | { | ||
1357 | const unsigned int n = (unsigned int)num; | ||
1358 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1359 | } | ||
1360 | |||
1361 | static __inline tile_bundle_bits | ||
1362 | create_UnShOpcodeExtension_X0(int num) | ||
1363 | { | ||
1364 | const unsigned int n = (unsigned int)num; | ||
1365 | return ((n & 0x3ff) << 17); | ||
1366 | } | ||
1367 | |||
1368 | static __inline tile_bundle_bits | ||
1369 | create_UnShOpcodeExtension_X1(int num) | ||
1370 | { | ||
1371 | const unsigned int n = (unsigned int)num; | ||
1372 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1373 | } | ||
1374 | |||
1375 | static __inline tile_bundle_bits | ||
1376 | create_UnShOpcodeExtension_Y0(int num) | ||
1377 | { | ||
1378 | const unsigned int n = (unsigned int)num; | ||
1379 | return ((n & 0x7) << 17); | ||
1380 | } | ||
1381 | |||
1382 | static __inline tile_bundle_bits | ||
1383 | create_UnShOpcodeExtension_Y1(int num) | ||
1384 | { | ||
1385 | const unsigned int n = (unsigned int)num; | ||
1386 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1387 | } | ||
1388 | |||
1389 | |||
1390 | typedef unsigned short tile_sn_instruction_bits; | ||
1391 | |||
1392 | |||
1393 | typedef enum | ||
1394 | { | ||
1395 | TILE_PIPELINE_X0, | ||
1396 | TILE_PIPELINE_X1, | ||
1397 | TILE_PIPELINE_Y0, | ||
1398 | TILE_PIPELINE_Y1, | ||
1399 | TILE_PIPELINE_Y2, | ||
1400 | } tile_pipeline; | ||
1401 | |||
1402 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1403 | |||
1404 | typedef enum | ||
1405 | { | ||
1406 | TILE_OP_TYPE_REGISTER, | ||
1407 | TILE_OP_TYPE_IMMEDIATE, | ||
1408 | TILE_OP_TYPE_ADDRESS, | ||
1409 | TILE_OP_TYPE_SPR | ||
1410 | } tile_operand_type; | ||
1411 | |||
1412 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1413 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1414 | |||
1415 | enum | ||
1416 | { | ||
1417 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1418 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1419 | |||
1420 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1421 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1422 | |||
1423 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1424 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1425 | |||
1426 | /* Instructions take this many bytes. */ | ||
1427 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1428 | |||
1429 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1430 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1431 | |||
1432 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1433 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1434 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1435 | |||
1436 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1437 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1438 | |||
1439 | /* Static network instructions take this many bytes. */ | ||
1440 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1441 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1442 | |||
1443 | /* Number of registers (some are magic, such as network I/O). */ | ||
1444 | TILE_NUM_REGISTERS = 64, | ||
1445 | |||
1446 | /* Number of static network registers. */ | ||
1447 | TILE_NUM_SN_REGISTERS = 4 | ||
1448 | }; | ||
1449 | |||
1450 | |||
1451 | struct tile_operand | ||
1452 | { | ||
1453 | /* Is this operand a register, immediate or address? */ | ||
1454 | tile_operand_type type; | ||
1455 | |||
1456 | /* The default relocation type for this operand. */ | ||
1457 | signed int default_reloc : 16; | ||
1458 | |||
1459 | /* How many bits is this value? (used for range checking) */ | ||
1460 | unsigned int num_bits : 5; | ||
1461 | |||
1462 | /* Is the value signed? (used for range checking) */ | ||
1463 | unsigned int is_signed : 1; | ||
1464 | |||
1465 | /* Is this operand a source register? */ | ||
1466 | unsigned int is_src_reg : 1; | ||
1467 | |||
1468 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1469 | unsigned int is_dest_reg : 1; | ||
1470 | |||
1471 | /* Is this operand PC-relative? */ | ||
1472 | unsigned int is_pc_relative : 1; | ||
1473 | |||
1474 | /* By how many bits do we right shift the value before inserting? */ | ||
1475 | unsigned int rightshift : 2; | ||
1476 | |||
1477 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1478 | tile_bundle_bits (*insert) (int op); | ||
1479 | |||
1480 | /* Extract this operand and return it. */ | ||
1481 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1482 | }; | ||
1483 | |||
1484 | |||
1485 | extern const struct tile_operand tile_operands[]; | ||
1486 | |||
1487 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1488 | extern const unsigned short * const | ||
1489 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1490 | |||
1491 | |||
1492 | struct tile_opcode | ||
1493 | { | ||
1494 | /* The opcode mnemonic, e.g. "add" */ | ||
1495 | const char *name; | ||
1496 | |||
1497 | /* The enum value for this mnemonic. */ | ||
1498 | tile_mnemonic mnemonic; | ||
1499 | |||
1500 | /* A bit mask of which of the five pipes this instruction | ||
1501 | is compatible with: | ||
1502 | X0 0x01 | ||
1503 | X1 0x02 | ||
1504 | Y0 0x04 | ||
1505 | Y1 0x08 | ||
1506 | Y2 0x10 */ | ||
1507 | unsigned char pipes; | ||
1508 | |||
1509 | /* How many operands are there? */ | ||
1510 | unsigned char num_operands; | ||
1511 | |||
1512 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1513 | unsigned char implicitly_written_register; | ||
1514 | |||
1515 | /* Can this be bundled with other instructions (almost always true). */ | ||
1516 | unsigned char can_bundle; | ||
1517 | |||
1518 | /* The description of the operands. Each of these is an | ||
1519 | * index into the tile_operands[] table. */ | ||
1520 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1521 | |||
1522 | /* A mask of which bits have predefined values for each pipeline. | ||
1523 | * This is useful for disassembly. */ | ||
1524 | tile_bundle_bits fixed_bit_masks[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1525 | |||
1526 | /* For each bit set in fixed_bit_masks, what the value is for this | ||
1527 | * instruction. */ | ||
1528 | tile_bundle_bits fixed_bit_values[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1529 | }; | ||
1530 | |||
1531 | extern const struct tile_opcode tile_opcodes[]; | ||
1532 | |||
1533 | struct tile_sn_opcode | ||
1534 | { | ||
1535 | /* The opcode mnemonic, e.g. "add" */ | ||
1536 | const char *name; | ||
1537 | |||
1538 | /* The enum value for this mnemonic. */ | ||
1539 | tile_sn_mnemonic mnemonic; | ||
1540 | |||
1541 | /* How many operands are there? */ | ||
1542 | unsigned char num_operands; | ||
1543 | |||
1544 | /* The description of the operands. Each of these is an | ||
1545 | * index into the tile_operands[] table. */ | ||
1546 | unsigned char operands[TILE_SN_MAX_OPERANDS]; | ||
1547 | |||
1548 | /* A mask of which bits have predefined values. | ||
1549 | * This is useful for disassembly. */ | ||
1550 | tile_sn_instruction_bits fixed_bit_mask; | ||
1551 | |||
1552 | /* For each bit set in fixed_bit_masks, what its value is. */ | ||
1553 | tile_sn_instruction_bits fixed_bit_values; | ||
1554 | }; | ||
1555 | |||
1556 | extern const struct tile_sn_opcode tile_sn_opcodes[]; | ||
1557 | |||
1558 | /* Used for non-textual disassembly into structs. */ | ||
1559 | struct tile_decoded_instruction | ||
1560 | { | ||
1561 | const struct tile_opcode *opcode; | ||
1562 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1563 | int operand_values[TILE_MAX_OPERANDS]; | ||
1564 | }; | ||
1565 | |||
1566 | |||
1567 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1568 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1569 | unsigned int pc, | ||
1570 | struct tile_decoded_instruction | ||
1571 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1572 | |||
1573 | |||
1574 | /* Canonical names of all the registers. */ | ||
1575 | /* ISSUE: This table lives in "tile-dis.c" */ | ||
1576 | extern const char * const tile_register_names[]; | ||
1577 | |||
1578 | /* Descriptor for a special-purpose register. */ | ||
1579 | struct tile_spr | ||
1580 | { | ||
1581 | /* The number */ | ||
1582 | int number; | ||
1583 | |||
1584 | /* The name */ | ||
1585 | const char *name; | ||
1586 | }; | ||
1587 | |||
1588 | /* List of all the SPRs; ordered by increasing number. */ | ||
1589 | extern const struct tile_spr tile_sprs[]; | ||
1590 | |||
1591 | /* Number of special-purpose registers. */ | ||
1592 | extern const int tile_num_sprs; | ||
1593 | |||
1594 | extern const char * | ||
1595 | get_tile_spr_name (int num); | ||
1596 | |||
1597 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode-tile_64.h b/arch/tile/include/asm/opcode-tile_64.h new file mode 100644 index 000000000000..90f8dd372531 --- /dev/null +++ b/arch/tile/include/asm/opcode-tile_64.h | |||
@@ -0,0 +1,1597 @@ | |||
1 | /* tile.h -- Header file for TILE opcode table | ||
2 | Copyright (C) 2005 Free Software Foundation, Inc. | ||
3 | Contributed by Tilera Corp. */ | ||
4 | |||
5 | #ifndef opcode_tile_h | ||
6 | #define opcode_tile_h | ||
7 | |||
8 | typedef unsigned long long tile_bundle_bits; | ||
9 | |||
10 | |||
11 | enum | ||
12 | { | ||
13 | TILE_MAX_OPERANDS = 5 /* mm */ | ||
14 | }; | ||
15 | |||
16 | typedef enum | ||
17 | { | ||
18 | TILE_OPC_BPT, | ||
19 | TILE_OPC_INFO, | ||
20 | TILE_OPC_INFOL, | ||
21 | TILE_OPC_J, | ||
22 | TILE_OPC_JAL, | ||
23 | TILE_OPC_MOVE, | ||
24 | TILE_OPC_MOVE_SN, | ||
25 | TILE_OPC_MOVEI, | ||
26 | TILE_OPC_MOVEI_SN, | ||
27 | TILE_OPC_MOVELI, | ||
28 | TILE_OPC_MOVELI_SN, | ||
29 | TILE_OPC_MOVELIS, | ||
30 | TILE_OPC_PREFETCH, | ||
31 | TILE_OPC_ADD, | ||
32 | TILE_OPC_ADD_SN, | ||
33 | TILE_OPC_ADDB, | ||
34 | TILE_OPC_ADDB_SN, | ||
35 | TILE_OPC_ADDBS_U, | ||
36 | TILE_OPC_ADDBS_U_SN, | ||
37 | TILE_OPC_ADDH, | ||
38 | TILE_OPC_ADDH_SN, | ||
39 | TILE_OPC_ADDHS, | ||
40 | TILE_OPC_ADDHS_SN, | ||
41 | TILE_OPC_ADDI, | ||
42 | TILE_OPC_ADDI_SN, | ||
43 | TILE_OPC_ADDIB, | ||
44 | TILE_OPC_ADDIB_SN, | ||
45 | TILE_OPC_ADDIH, | ||
46 | TILE_OPC_ADDIH_SN, | ||
47 | TILE_OPC_ADDLI, | ||
48 | TILE_OPC_ADDLI_SN, | ||
49 | TILE_OPC_ADDLIS, | ||
50 | TILE_OPC_ADDS, | ||
51 | TILE_OPC_ADDS_SN, | ||
52 | TILE_OPC_ADIFFB_U, | ||
53 | TILE_OPC_ADIFFB_U_SN, | ||
54 | TILE_OPC_ADIFFH, | ||
55 | TILE_OPC_ADIFFH_SN, | ||
56 | TILE_OPC_AND, | ||
57 | TILE_OPC_AND_SN, | ||
58 | TILE_OPC_ANDI, | ||
59 | TILE_OPC_ANDI_SN, | ||
60 | TILE_OPC_AULI, | ||
61 | TILE_OPC_AVGB_U, | ||
62 | TILE_OPC_AVGB_U_SN, | ||
63 | TILE_OPC_AVGH, | ||
64 | TILE_OPC_AVGH_SN, | ||
65 | TILE_OPC_BBNS, | ||
66 | TILE_OPC_BBNS_SN, | ||
67 | TILE_OPC_BBNST, | ||
68 | TILE_OPC_BBNST_SN, | ||
69 | TILE_OPC_BBS, | ||
70 | TILE_OPC_BBS_SN, | ||
71 | TILE_OPC_BBST, | ||
72 | TILE_OPC_BBST_SN, | ||
73 | TILE_OPC_BGEZ, | ||
74 | TILE_OPC_BGEZ_SN, | ||
75 | TILE_OPC_BGEZT, | ||
76 | TILE_OPC_BGEZT_SN, | ||
77 | TILE_OPC_BGZ, | ||
78 | TILE_OPC_BGZ_SN, | ||
79 | TILE_OPC_BGZT, | ||
80 | TILE_OPC_BGZT_SN, | ||
81 | TILE_OPC_BITX, | ||
82 | TILE_OPC_BITX_SN, | ||
83 | TILE_OPC_BLEZ, | ||
84 | TILE_OPC_BLEZ_SN, | ||
85 | TILE_OPC_BLEZT, | ||
86 | TILE_OPC_BLEZT_SN, | ||
87 | TILE_OPC_BLZ, | ||
88 | TILE_OPC_BLZ_SN, | ||
89 | TILE_OPC_BLZT, | ||
90 | TILE_OPC_BLZT_SN, | ||
91 | TILE_OPC_BNZ, | ||
92 | TILE_OPC_BNZ_SN, | ||
93 | TILE_OPC_BNZT, | ||
94 | TILE_OPC_BNZT_SN, | ||
95 | TILE_OPC_BYTEX, | ||
96 | TILE_OPC_BYTEX_SN, | ||
97 | TILE_OPC_BZ, | ||
98 | TILE_OPC_BZ_SN, | ||
99 | TILE_OPC_BZT, | ||
100 | TILE_OPC_BZT_SN, | ||
101 | TILE_OPC_CLZ, | ||
102 | TILE_OPC_CLZ_SN, | ||
103 | TILE_OPC_CRC32_32, | ||
104 | TILE_OPC_CRC32_32_SN, | ||
105 | TILE_OPC_CRC32_8, | ||
106 | TILE_OPC_CRC32_8_SN, | ||
107 | TILE_OPC_CTZ, | ||
108 | TILE_OPC_CTZ_SN, | ||
109 | TILE_OPC_DRAIN, | ||
110 | TILE_OPC_DTLBPR, | ||
111 | TILE_OPC_DWORD_ALIGN, | ||
112 | TILE_OPC_DWORD_ALIGN_SN, | ||
113 | TILE_OPC_FINV, | ||
114 | TILE_OPC_FLUSH, | ||
115 | TILE_OPC_FNOP, | ||
116 | TILE_OPC_ICOH, | ||
117 | TILE_OPC_ILL, | ||
118 | TILE_OPC_INTHB, | ||
119 | TILE_OPC_INTHB_SN, | ||
120 | TILE_OPC_INTHH, | ||
121 | TILE_OPC_INTHH_SN, | ||
122 | TILE_OPC_INTLB, | ||
123 | TILE_OPC_INTLB_SN, | ||
124 | TILE_OPC_INTLH, | ||
125 | TILE_OPC_INTLH_SN, | ||
126 | TILE_OPC_INV, | ||
127 | TILE_OPC_IRET, | ||
128 | TILE_OPC_JALB, | ||
129 | TILE_OPC_JALF, | ||
130 | TILE_OPC_JALR, | ||
131 | TILE_OPC_JALRP, | ||
132 | TILE_OPC_JB, | ||
133 | TILE_OPC_JF, | ||
134 | TILE_OPC_JR, | ||
135 | TILE_OPC_JRP, | ||
136 | TILE_OPC_LB, | ||
137 | TILE_OPC_LB_SN, | ||
138 | TILE_OPC_LB_U, | ||
139 | TILE_OPC_LB_U_SN, | ||
140 | TILE_OPC_LBADD, | ||
141 | TILE_OPC_LBADD_SN, | ||
142 | TILE_OPC_LBADD_U, | ||
143 | TILE_OPC_LBADD_U_SN, | ||
144 | TILE_OPC_LH, | ||
145 | TILE_OPC_LH_SN, | ||
146 | TILE_OPC_LH_U, | ||
147 | TILE_OPC_LH_U_SN, | ||
148 | TILE_OPC_LHADD, | ||
149 | TILE_OPC_LHADD_SN, | ||
150 | TILE_OPC_LHADD_U, | ||
151 | TILE_OPC_LHADD_U_SN, | ||
152 | TILE_OPC_LNK, | ||
153 | TILE_OPC_LNK_SN, | ||
154 | TILE_OPC_LW, | ||
155 | TILE_OPC_LW_SN, | ||
156 | TILE_OPC_LW_NA, | ||
157 | TILE_OPC_LW_NA_SN, | ||
158 | TILE_OPC_LWADD, | ||
159 | TILE_OPC_LWADD_SN, | ||
160 | TILE_OPC_LWADD_NA, | ||
161 | TILE_OPC_LWADD_NA_SN, | ||
162 | TILE_OPC_MAXB_U, | ||
163 | TILE_OPC_MAXB_U_SN, | ||
164 | TILE_OPC_MAXH, | ||
165 | TILE_OPC_MAXH_SN, | ||
166 | TILE_OPC_MAXIB_U, | ||
167 | TILE_OPC_MAXIB_U_SN, | ||
168 | TILE_OPC_MAXIH, | ||
169 | TILE_OPC_MAXIH_SN, | ||
170 | TILE_OPC_MF, | ||
171 | TILE_OPC_MFSPR, | ||
172 | TILE_OPC_MINB_U, | ||
173 | TILE_OPC_MINB_U_SN, | ||
174 | TILE_OPC_MINH, | ||
175 | TILE_OPC_MINH_SN, | ||
176 | TILE_OPC_MINIB_U, | ||
177 | TILE_OPC_MINIB_U_SN, | ||
178 | TILE_OPC_MINIH, | ||
179 | TILE_OPC_MINIH_SN, | ||
180 | TILE_OPC_MM, | ||
181 | TILE_OPC_MNZ, | ||
182 | TILE_OPC_MNZ_SN, | ||
183 | TILE_OPC_MNZB, | ||
184 | TILE_OPC_MNZB_SN, | ||
185 | TILE_OPC_MNZH, | ||
186 | TILE_OPC_MNZH_SN, | ||
187 | TILE_OPC_MTSPR, | ||
188 | TILE_OPC_MULHH_SS, | ||
189 | TILE_OPC_MULHH_SS_SN, | ||
190 | TILE_OPC_MULHH_SU, | ||
191 | TILE_OPC_MULHH_SU_SN, | ||
192 | TILE_OPC_MULHH_UU, | ||
193 | TILE_OPC_MULHH_UU_SN, | ||
194 | TILE_OPC_MULHHA_SS, | ||
195 | TILE_OPC_MULHHA_SS_SN, | ||
196 | TILE_OPC_MULHHA_SU, | ||
197 | TILE_OPC_MULHHA_SU_SN, | ||
198 | TILE_OPC_MULHHA_UU, | ||
199 | TILE_OPC_MULHHA_UU_SN, | ||
200 | TILE_OPC_MULHHSA_UU, | ||
201 | TILE_OPC_MULHHSA_UU_SN, | ||
202 | TILE_OPC_MULHL_SS, | ||
203 | TILE_OPC_MULHL_SS_SN, | ||
204 | TILE_OPC_MULHL_SU, | ||
205 | TILE_OPC_MULHL_SU_SN, | ||
206 | TILE_OPC_MULHL_US, | ||
207 | TILE_OPC_MULHL_US_SN, | ||
208 | TILE_OPC_MULHL_UU, | ||
209 | TILE_OPC_MULHL_UU_SN, | ||
210 | TILE_OPC_MULHLA_SS, | ||
211 | TILE_OPC_MULHLA_SS_SN, | ||
212 | TILE_OPC_MULHLA_SU, | ||
213 | TILE_OPC_MULHLA_SU_SN, | ||
214 | TILE_OPC_MULHLA_US, | ||
215 | TILE_OPC_MULHLA_US_SN, | ||
216 | TILE_OPC_MULHLA_UU, | ||
217 | TILE_OPC_MULHLA_UU_SN, | ||
218 | TILE_OPC_MULHLSA_UU, | ||
219 | TILE_OPC_MULHLSA_UU_SN, | ||
220 | TILE_OPC_MULLL_SS, | ||
221 | TILE_OPC_MULLL_SS_SN, | ||
222 | TILE_OPC_MULLL_SU, | ||
223 | TILE_OPC_MULLL_SU_SN, | ||
224 | TILE_OPC_MULLL_UU, | ||
225 | TILE_OPC_MULLL_UU_SN, | ||
226 | TILE_OPC_MULLLA_SS, | ||
227 | TILE_OPC_MULLLA_SS_SN, | ||
228 | TILE_OPC_MULLLA_SU, | ||
229 | TILE_OPC_MULLLA_SU_SN, | ||
230 | TILE_OPC_MULLLA_UU, | ||
231 | TILE_OPC_MULLLA_UU_SN, | ||
232 | TILE_OPC_MULLLSA_UU, | ||
233 | TILE_OPC_MULLLSA_UU_SN, | ||
234 | TILE_OPC_MVNZ, | ||
235 | TILE_OPC_MVNZ_SN, | ||
236 | TILE_OPC_MVZ, | ||
237 | TILE_OPC_MVZ_SN, | ||
238 | TILE_OPC_MZ, | ||
239 | TILE_OPC_MZ_SN, | ||
240 | TILE_OPC_MZB, | ||
241 | TILE_OPC_MZB_SN, | ||
242 | TILE_OPC_MZH, | ||
243 | TILE_OPC_MZH_SN, | ||
244 | TILE_OPC_NAP, | ||
245 | TILE_OPC_NOP, | ||
246 | TILE_OPC_NOR, | ||
247 | TILE_OPC_NOR_SN, | ||
248 | TILE_OPC_OR, | ||
249 | TILE_OPC_OR_SN, | ||
250 | TILE_OPC_ORI, | ||
251 | TILE_OPC_ORI_SN, | ||
252 | TILE_OPC_PACKBS_U, | ||
253 | TILE_OPC_PACKBS_U_SN, | ||
254 | TILE_OPC_PACKHB, | ||
255 | TILE_OPC_PACKHB_SN, | ||
256 | TILE_OPC_PACKHS, | ||
257 | TILE_OPC_PACKHS_SN, | ||
258 | TILE_OPC_PACKLB, | ||
259 | TILE_OPC_PACKLB_SN, | ||
260 | TILE_OPC_PCNT, | ||
261 | TILE_OPC_PCNT_SN, | ||
262 | TILE_OPC_RL, | ||
263 | TILE_OPC_RL_SN, | ||
264 | TILE_OPC_RLI, | ||
265 | TILE_OPC_RLI_SN, | ||
266 | TILE_OPC_S1A, | ||
267 | TILE_OPC_S1A_SN, | ||
268 | TILE_OPC_S2A, | ||
269 | TILE_OPC_S2A_SN, | ||
270 | TILE_OPC_S3A, | ||
271 | TILE_OPC_S3A_SN, | ||
272 | TILE_OPC_SADAB_U, | ||
273 | TILE_OPC_SADAB_U_SN, | ||
274 | TILE_OPC_SADAH, | ||
275 | TILE_OPC_SADAH_SN, | ||
276 | TILE_OPC_SADAH_U, | ||
277 | TILE_OPC_SADAH_U_SN, | ||
278 | TILE_OPC_SADB_U, | ||
279 | TILE_OPC_SADB_U_SN, | ||
280 | TILE_OPC_SADH, | ||
281 | TILE_OPC_SADH_SN, | ||
282 | TILE_OPC_SADH_U, | ||
283 | TILE_OPC_SADH_U_SN, | ||
284 | TILE_OPC_SB, | ||
285 | TILE_OPC_SBADD, | ||
286 | TILE_OPC_SEQ, | ||
287 | TILE_OPC_SEQ_SN, | ||
288 | TILE_OPC_SEQB, | ||
289 | TILE_OPC_SEQB_SN, | ||
290 | TILE_OPC_SEQH, | ||
291 | TILE_OPC_SEQH_SN, | ||
292 | TILE_OPC_SEQI, | ||
293 | TILE_OPC_SEQI_SN, | ||
294 | TILE_OPC_SEQIB, | ||
295 | TILE_OPC_SEQIB_SN, | ||
296 | TILE_OPC_SEQIH, | ||
297 | TILE_OPC_SEQIH_SN, | ||
298 | TILE_OPC_SH, | ||
299 | TILE_OPC_SHADD, | ||
300 | TILE_OPC_SHL, | ||
301 | TILE_OPC_SHL_SN, | ||
302 | TILE_OPC_SHLB, | ||
303 | TILE_OPC_SHLB_SN, | ||
304 | TILE_OPC_SHLH, | ||
305 | TILE_OPC_SHLH_SN, | ||
306 | TILE_OPC_SHLI, | ||
307 | TILE_OPC_SHLI_SN, | ||
308 | TILE_OPC_SHLIB, | ||
309 | TILE_OPC_SHLIB_SN, | ||
310 | TILE_OPC_SHLIH, | ||
311 | TILE_OPC_SHLIH_SN, | ||
312 | TILE_OPC_SHR, | ||
313 | TILE_OPC_SHR_SN, | ||
314 | TILE_OPC_SHRB, | ||
315 | TILE_OPC_SHRB_SN, | ||
316 | TILE_OPC_SHRH, | ||
317 | TILE_OPC_SHRH_SN, | ||
318 | TILE_OPC_SHRI, | ||
319 | TILE_OPC_SHRI_SN, | ||
320 | TILE_OPC_SHRIB, | ||
321 | TILE_OPC_SHRIB_SN, | ||
322 | TILE_OPC_SHRIH, | ||
323 | TILE_OPC_SHRIH_SN, | ||
324 | TILE_OPC_SLT, | ||
325 | TILE_OPC_SLT_SN, | ||
326 | TILE_OPC_SLT_U, | ||
327 | TILE_OPC_SLT_U_SN, | ||
328 | TILE_OPC_SLTB, | ||
329 | TILE_OPC_SLTB_SN, | ||
330 | TILE_OPC_SLTB_U, | ||
331 | TILE_OPC_SLTB_U_SN, | ||
332 | TILE_OPC_SLTE, | ||
333 | TILE_OPC_SLTE_SN, | ||
334 | TILE_OPC_SLTE_U, | ||
335 | TILE_OPC_SLTE_U_SN, | ||
336 | TILE_OPC_SLTEB, | ||
337 | TILE_OPC_SLTEB_SN, | ||
338 | TILE_OPC_SLTEB_U, | ||
339 | TILE_OPC_SLTEB_U_SN, | ||
340 | TILE_OPC_SLTEH, | ||
341 | TILE_OPC_SLTEH_SN, | ||
342 | TILE_OPC_SLTEH_U, | ||
343 | TILE_OPC_SLTEH_U_SN, | ||
344 | TILE_OPC_SLTH, | ||
345 | TILE_OPC_SLTH_SN, | ||
346 | TILE_OPC_SLTH_U, | ||
347 | TILE_OPC_SLTH_U_SN, | ||
348 | TILE_OPC_SLTI, | ||
349 | TILE_OPC_SLTI_SN, | ||
350 | TILE_OPC_SLTI_U, | ||
351 | TILE_OPC_SLTI_U_SN, | ||
352 | TILE_OPC_SLTIB, | ||
353 | TILE_OPC_SLTIB_SN, | ||
354 | TILE_OPC_SLTIB_U, | ||
355 | TILE_OPC_SLTIB_U_SN, | ||
356 | TILE_OPC_SLTIH, | ||
357 | TILE_OPC_SLTIH_SN, | ||
358 | TILE_OPC_SLTIH_U, | ||
359 | TILE_OPC_SLTIH_U_SN, | ||
360 | TILE_OPC_SNE, | ||
361 | TILE_OPC_SNE_SN, | ||
362 | TILE_OPC_SNEB, | ||
363 | TILE_OPC_SNEB_SN, | ||
364 | TILE_OPC_SNEH, | ||
365 | TILE_OPC_SNEH_SN, | ||
366 | TILE_OPC_SRA, | ||
367 | TILE_OPC_SRA_SN, | ||
368 | TILE_OPC_SRAB, | ||
369 | TILE_OPC_SRAB_SN, | ||
370 | TILE_OPC_SRAH, | ||
371 | TILE_OPC_SRAH_SN, | ||
372 | TILE_OPC_SRAI, | ||
373 | TILE_OPC_SRAI_SN, | ||
374 | TILE_OPC_SRAIB, | ||
375 | TILE_OPC_SRAIB_SN, | ||
376 | TILE_OPC_SRAIH, | ||
377 | TILE_OPC_SRAIH_SN, | ||
378 | TILE_OPC_SUB, | ||
379 | TILE_OPC_SUB_SN, | ||
380 | TILE_OPC_SUBB, | ||
381 | TILE_OPC_SUBB_SN, | ||
382 | TILE_OPC_SUBBS_U, | ||
383 | TILE_OPC_SUBBS_U_SN, | ||
384 | TILE_OPC_SUBH, | ||
385 | TILE_OPC_SUBH_SN, | ||
386 | TILE_OPC_SUBHS, | ||
387 | TILE_OPC_SUBHS_SN, | ||
388 | TILE_OPC_SUBS, | ||
389 | TILE_OPC_SUBS_SN, | ||
390 | TILE_OPC_SW, | ||
391 | TILE_OPC_SWADD, | ||
392 | TILE_OPC_SWINT0, | ||
393 | TILE_OPC_SWINT1, | ||
394 | TILE_OPC_SWINT2, | ||
395 | TILE_OPC_SWINT3, | ||
396 | TILE_OPC_TBLIDXB0, | ||
397 | TILE_OPC_TBLIDXB0_SN, | ||
398 | TILE_OPC_TBLIDXB1, | ||
399 | TILE_OPC_TBLIDXB1_SN, | ||
400 | TILE_OPC_TBLIDXB2, | ||
401 | TILE_OPC_TBLIDXB2_SN, | ||
402 | TILE_OPC_TBLIDXB3, | ||
403 | TILE_OPC_TBLIDXB3_SN, | ||
404 | TILE_OPC_TNS, | ||
405 | TILE_OPC_TNS_SN, | ||
406 | TILE_OPC_WH64, | ||
407 | TILE_OPC_XOR, | ||
408 | TILE_OPC_XOR_SN, | ||
409 | TILE_OPC_XORI, | ||
410 | TILE_OPC_XORI_SN, | ||
411 | TILE_OPC_NONE | ||
412 | } tile_mnemonic; | ||
413 | |||
414 | /* 64-bit pattern for a { bpt ; nop } bundle. */ | ||
415 | #define TILE_BPT_BUNDLE 0x400b3cae70166000ULL | ||
416 | |||
417 | |||
418 | #define TILE_ELF_MACHINE_CODE EM_TILEPRO | ||
419 | |||
420 | #define TILE_ELF_NAME "elf32-tilepro" | ||
421 | |||
422 | enum | ||
423 | { | ||
424 | TILE_SN_MAX_OPERANDS = 6 /* route */ | ||
425 | }; | ||
426 | |||
427 | typedef enum | ||
428 | { | ||
429 | TILE_SN_OPC_BZ, | ||
430 | TILE_SN_OPC_BNZ, | ||
431 | TILE_SN_OPC_JRR, | ||
432 | TILE_SN_OPC_FNOP, | ||
433 | TILE_SN_OPC_BLZ, | ||
434 | TILE_SN_OPC_NOP, | ||
435 | TILE_SN_OPC_MOVEI, | ||
436 | TILE_SN_OPC_MOVE, | ||
437 | TILE_SN_OPC_BGEZ, | ||
438 | TILE_SN_OPC_JR, | ||
439 | TILE_SN_OPC_BLEZ, | ||
440 | TILE_SN_OPC_BBNS, | ||
441 | TILE_SN_OPC_JALRR, | ||
442 | TILE_SN_OPC_BPT, | ||
443 | TILE_SN_OPC_JALR, | ||
444 | TILE_SN_OPC_SHR1, | ||
445 | TILE_SN_OPC_BGZ, | ||
446 | TILE_SN_OPC_BBS, | ||
447 | TILE_SN_OPC_SHL8II, | ||
448 | TILE_SN_OPC_ADDI, | ||
449 | TILE_SN_OPC_HALT, | ||
450 | TILE_SN_OPC_ROUTE, | ||
451 | TILE_SN_OPC_NONE | ||
452 | } tile_sn_mnemonic; | ||
453 | |||
454 | extern const unsigned char tile_sn_route_encode[6 * 6 * 6]; | ||
455 | extern const signed char tile_sn_route_decode[256][3]; | ||
456 | extern const char tile_sn_direction_names[6][5]; | ||
457 | extern const signed char tile_sn_dest_map[6][6]; | ||
458 | |||
459 | |||
460 | static __inline unsigned int | ||
461 | get_BrOff_SN(tile_bundle_bits num) | ||
462 | { | ||
463 | const unsigned int n = (unsigned int)num; | ||
464 | return (((n >> 0)) & 0x3ff); | ||
465 | } | ||
466 | |||
467 | static __inline unsigned int | ||
468 | get_BrOff_X1(tile_bundle_bits n) | ||
469 | { | ||
470 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
471 | (((unsigned int)(n >> 20)) & 0x00018000); | ||
472 | } | ||
473 | |||
474 | static __inline unsigned int | ||
475 | get_BrType_X1(tile_bundle_bits n) | ||
476 | { | ||
477 | return (((unsigned int)(n >> 31)) & 0xf); | ||
478 | } | ||
479 | |||
480 | static __inline unsigned int | ||
481 | get_Dest_Imm8_X1(tile_bundle_bits n) | ||
482 | { | ||
483 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
484 | (((unsigned int)(n >> 43)) & 0x000000c0); | ||
485 | } | ||
486 | |||
487 | static __inline unsigned int | ||
488 | get_Dest_SN(tile_bundle_bits num) | ||
489 | { | ||
490 | const unsigned int n = (unsigned int)num; | ||
491 | return (((n >> 2)) & 0x3); | ||
492 | } | ||
493 | |||
494 | static __inline unsigned int | ||
495 | get_Dest_X0(tile_bundle_bits num) | ||
496 | { | ||
497 | const unsigned int n = (unsigned int)num; | ||
498 | return (((n >> 0)) & 0x3f); | ||
499 | } | ||
500 | |||
501 | static __inline unsigned int | ||
502 | get_Dest_X1(tile_bundle_bits n) | ||
503 | { | ||
504 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
505 | } | ||
506 | |||
507 | static __inline unsigned int | ||
508 | get_Dest_Y0(tile_bundle_bits num) | ||
509 | { | ||
510 | const unsigned int n = (unsigned int)num; | ||
511 | return (((n >> 0)) & 0x3f); | ||
512 | } | ||
513 | |||
514 | static __inline unsigned int | ||
515 | get_Dest_Y1(tile_bundle_bits n) | ||
516 | { | ||
517 | return (((unsigned int)(n >> 31)) & 0x3f); | ||
518 | } | ||
519 | |||
520 | static __inline unsigned int | ||
521 | get_Imm16_X0(tile_bundle_bits num) | ||
522 | { | ||
523 | const unsigned int n = (unsigned int)num; | ||
524 | return (((n >> 12)) & 0xffff); | ||
525 | } | ||
526 | |||
527 | static __inline unsigned int | ||
528 | get_Imm16_X1(tile_bundle_bits n) | ||
529 | { | ||
530 | return (((unsigned int)(n >> 43)) & 0xffff); | ||
531 | } | ||
532 | |||
533 | static __inline unsigned int | ||
534 | get_Imm8_SN(tile_bundle_bits num) | ||
535 | { | ||
536 | const unsigned int n = (unsigned int)num; | ||
537 | return (((n >> 0)) & 0xff); | ||
538 | } | ||
539 | |||
540 | static __inline unsigned int | ||
541 | get_Imm8_X0(tile_bundle_bits num) | ||
542 | { | ||
543 | const unsigned int n = (unsigned int)num; | ||
544 | return (((n >> 12)) & 0xff); | ||
545 | } | ||
546 | |||
547 | static __inline unsigned int | ||
548 | get_Imm8_X1(tile_bundle_bits n) | ||
549 | { | ||
550 | return (((unsigned int)(n >> 43)) & 0xff); | ||
551 | } | ||
552 | |||
553 | static __inline unsigned int | ||
554 | get_Imm8_Y0(tile_bundle_bits num) | ||
555 | { | ||
556 | const unsigned int n = (unsigned int)num; | ||
557 | return (((n >> 12)) & 0xff); | ||
558 | } | ||
559 | |||
560 | static __inline unsigned int | ||
561 | get_Imm8_Y1(tile_bundle_bits n) | ||
562 | { | ||
563 | return (((unsigned int)(n >> 43)) & 0xff); | ||
564 | } | ||
565 | |||
566 | static __inline unsigned int | ||
567 | get_ImmOpcodeExtension_X0(tile_bundle_bits num) | ||
568 | { | ||
569 | const unsigned int n = (unsigned int)num; | ||
570 | return (((n >> 20)) & 0x7f); | ||
571 | } | ||
572 | |||
573 | static __inline unsigned int | ||
574 | get_ImmOpcodeExtension_X1(tile_bundle_bits n) | ||
575 | { | ||
576 | return (((unsigned int)(n >> 51)) & 0x7f); | ||
577 | } | ||
578 | |||
579 | static __inline unsigned int | ||
580 | get_ImmRROpcodeExtension_SN(tile_bundle_bits num) | ||
581 | { | ||
582 | const unsigned int n = (unsigned int)num; | ||
583 | return (((n >> 8)) & 0x3); | ||
584 | } | ||
585 | |||
586 | static __inline unsigned int | ||
587 | get_JOffLong_X1(tile_bundle_bits n) | ||
588 | { | ||
589 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
590 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
591 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
592 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
593 | (((unsigned int)(n >> 31)) & 0x18000000); | ||
594 | } | ||
595 | |||
596 | static __inline unsigned int | ||
597 | get_JOff_X1(tile_bundle_bits n) | ||
598 | { | ||
599 | return (((unsigned int)(n >> 43)) & 0x00007fff) | | ||
600 | (((unsigned int)(n >> 20)) & 0x00018000) | | ||
601 | (((unsigned int)(n >> 14)) & 0x001e0000) | | ||
602 | (((unsigned int)(n >> 16)) & 0x07e00000) | | ||
603 | (((unsigned int)(n >> 31)) & 0x08000000); | ||
604 | } | ||
605 | |||
606 | static __inline unsigned int | ||
607 | get_MF_Imm15_X1(tile_bundle_bits n) | ||
608 | { | ||
609 | return (((unsigned int)(n >> 37)) & 0x00003fff) | | ||
610 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
611 | } | ||
612 | |||
613 | static __inline unsigned int | ||
614 | get_MMEnd_X0(tile_bundle_bits num) | ||
615 | { | ||
616 | const unsigned int n = (unsigned int)num; | ||
617 | return (((n >> 18)) & 0x1f); | ||
618 | } | ||
619 | |||
620 | static __inline unsigned int | ||
621 | get_MMEnd_X1(tile_bundle_bits n) | ||
622 | { | ||
623 | return (((unsigned int)(n >> 49)) & 0x1f); | ||
624 | } | ||
625 | |||
626 | static __inline unsigned int | ||
627 | get_MMStart_X0(tile_bundle_bits num) | ||
628 | { | ||
629 | const unsigned int n = (unsigned int)num; | ||
630 | return (((n >> 23)) & 0x1f); | ||
631 | } | ||
632 | |||
633 | static __inline unsigned int | ||
634 | get_MMStart_X1(tile_bundle_bits n) | ||
635 | { | ||
636 | return (((unsigned int)(n >> 54)) & 0x1f); | ||
637 | } | ||
638 | |||
639 | static __inline unsigned int | ||
640 | get_MT_Imm15_X1(tile_bundle_bits n) | ||
641 | { | ||
642 | return (((unsigned int)(n >> 31)) & 0x0000003f) | | ||
643 | (((unsigned int)(n >> 37)) & 0x00003fc0) | | ||
644 | (((unsigned int)(n >> 44)) & 0x00004000); | ||
645 | } | ||
646 | |||
647 | static __inline unsigned int | ||
648 | get_Mode(tile_bundle_bits n) | ||
649 | { | ||
650 | return (((unsigned int)(n >> 63)) & 0x1); | ||
651 | } | ||
652 | |||
653 | static __inline unsigned int | ||
654 | get_NoRegOpcodeExtension_SN(tile_bundle_bits num) | ||
655 | { | ||
656 | const unsigned int n = (unsigned int)num; | ||
657 | return (((n >> 0)) & 0xf); | ||
658 | } | ||
659 | |||
660 | static __inline unsigned int | ||
661 | get_Opcode_SN(tile_bundle_bits num) | ||
662 | { | ||
663 | const unsigned int n = (unsigned int)num; | ||
664 | return (((n >> 10)) & 0x3f); | ||
665 | } | ||
666 | |||
667 | static __inline unsigned int | ||
668 | get_Opcode_X0(tile_bundle_bits num) | ||
669 | { | ||
670 | const unsigned int n = (unsigned int)num; | ||
671 | return (((n >> 28)) & 0x7); | ||
672 | } | ||
673 | |||
674 | static __inline unsigned int | ||
675 | get_Opcode_X1(tile_bundle_bits n) | ||
676 | { | ||
677 | return (((unsigned int)(n >> 59)) & 0xf); | ||
678 | } | ||
679 | |||
680 | static __inline unsigned int | ||
681 | get_Opcode_Y0(tile_bundle_bits num) | ||
682 | { | ||
683 | const unsigned int n = (unsigned int)num; | ||
684 | return (((n >> 27)) & 0xf); | ||
685 | } | ||
686 | |||
687 | static __inline unsigned int | ||
688 | get_Opcode_Y1(tile_bundle_bits n) | ||
689 | { | ||
690 | return (((unsigned int)(n >> 59)) & 0xf); | ||
691 | } | ||
692 | |||
693 | static __inline unsigned int | ||
694 | get_Opcode_Y2(tile_bundle_bits n) | ||
695 | { | ||
696 | return (((unsigned int)(n >> 56)) & 0x7); | ||
697 | } | ||
698 | |||
699 | static __inline unsigned int | ||
700 | get_RROpcodeExtension_SN(tile_bundle_bits num) | ||
701 | { | ||
702 | const unsigned int n = (unsigned int)num; | ||
703 | return (((n >> 4)) & 0xf); | ||
704 | } | ||
705 | |||
706 | static __inline unsigned int | ||
707 | get_RRROpcodeExtension_X0(tile_bundle_bits num) | ||
708 | { | ||
709 | const unsigned int n = (unsigned int)num; | ||
710 | return (((n >> 18)) & 0x1ff); | ||
711 | } | ||
712 | |||
713 | static __inline unsigned int | ||
714 | get_RRROpcodeExtension_X1(tile_bundle_bits n) | ||
715 | { | ||
716 | return (((unsigned int)(n >> 49)) & 0x1ff); | ||
717 | } | ||
718 | |||
719 | static __inline unsigned int | ||
720 | get_RRROpcodeExtension_Y0(tile_bundle_bits num) | ||
721 | { | ||
722 | const unsigned int n = (unsigned int)num; | ||
723 | return (((n >> 18)) & 0x3); | ||
724 | } | ||
725 | |||
726 | static __inline unsigned int | ||
727 | get_RRROpcodeExtension_Y1(tile_bundle_bits n) | ||
728 | { | ||
729 | return (((unsigned int)(n >> 49)) & 0x3); | ||
730 | } | ||
731 | |||
732 | static __inline unsigned int | ||
733 | get_RouteOpcodeExtension_SN(tile_bundle_bits num) | ||
734 | { | ||
735 | const unsigned int n = (unsigned int)num; | ||
736 | return (((n >> 0)) & 0x3ff); | ||
737 | } | ||
738 | |||
739 | static __inline unsigned int | ||
740 | get_S_X0(tile_bundle_bits num) | ||
741 | { | ||
742 | const unsigned int n = (unsigned int)num; | ||
743 | return (((n >> 27)) & 0x1); | ||
744 | } | ||
745 | |||
746 | static __inline unsigned int | ||
747 | get_S_X1(tile_bundle_bits n) | ||
748 | { | ||
749 | return (((unsigned int)(n >> 58)) & 0x1); | ||
750 | } | ||
751 | |||
752 | static __inline unsigned int | ||
753 | get_ShAmt_X0(tile_bundle_bits num) | ||
754 | { | ||
755 | const unsigned int n = (unsigned int)num; | ||
756 | return (((n >> 12)) & 0x1f); | ||
757 | } | ||
758 | |||
759 | static __inline unsigned int | ||
760 | get_ShAmt_X1(tile_bundle_bits n) | ||
761 | { | ||
762 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
763 | } | ||
764 | |||
765 | static __inline unsigned int | ||
766 | get_ShAmt_Y0(tile_bundle_bits num) | ||
767 | { | ||
768 | const unsigned int n = (unsigned int)num; | ||
769 | return (((n >> 12)) & 0x1f); | ||
770 | } | ||
771 | |||
772 | static __inline unsigned int | ||
773 | get_ShAmt_Y1(tile_bundle_bits n) | ||
774 | { | ||
775 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
776 | } | ||
777 | |||
778 | static __inline unsigned int | ||
779 | get_SrcA_X0(tile_bundle_bits num) | ||
780 | { | ||
781 | const unsigned int n = (unsigned int)num; | ||
782 | return (((n >> 6)) & 0x3f); | ||
783 | } | ||
784 | |||
785 | static __inline unsigned int | ||
786 | get_SrcA_X1(tile_bundle_bits n) | ||
787 | { | ||
788 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
789 | } | ||
790 | |||
791 | static __inline unsigned int | ||
792 | get_SrcA_Y0(tile_bundle_bits num) | ||
793 | { | ||
794 | const unsigned int n = (unsigned int)num; | ||
795 | return (((n >> 6)) & 0x3f); | ||
796 | } | ||
797 | |||
798 | static __inline unsigned int | ||
799 | get_SrcA_Y1(tile_bundle_bits n) | ||
800 | { | ||
801 | return (((unsigned int)(n >> 37)) & 0x3f); | ||
802 | } | ||
803 | |||
804 | static __inline unsigned int | ||
805 | get_SrcA_Y2(tile_bundle_bits n) | ||
806 | { | ||
807 | return (((n >> 26)) & 0x00000001) | | ||
808 | (((unsigned int)(n >> 50)) & 0x0000003e); | ||
809 | } | ||
810 | |||
811 | static __inline unsigned int | ||
812 | get_SrcBDest_Y2(tile_bundle_bits num) | ||
813 | { | ||
814 | const unsigned int n = (unsigned int)num; | ||
815 | return (((n >> 20)) & 0x3f); | ||
816 | } | ||
817 | |||
818 | static __inline unsigned int | ||
819 | get_SrcB_X0(tile_bundle_bits num) | ||
820 | { | ||
821 | const unsigned int n = (unsigned int)num; | ||
822 | return (((n >> 12)) & 0x3f); | ||
823 | } | ||
824 | |||
825 | static __inline unsigned int | ||
826 | get_SrcB_X1(tile_bundle_bits n) | ||
827 | { | ||
828 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
829 | } | ||
830 | |||
831 | static __inline unsigned int | ||
832 | get_SrcB_Y0(tile_bundle_bits num) | ||
833 | { | ||
834 | const unsigned int n = (unsigned int)num; | ||
835 | return (((n >> 12)) & 0x3f); | ||
836 | } | ||
837 | |||
838 | static __inline unsigned int | ||
839 | get_SrcB_Y1(tile_bundle_bits n) | ||
840 | { | ||
841 | return (((unsigned int)(n >> 43)) & 0x3f); | ||
842 | } | ||
843 | |||
844 | static __inline unsigned int | ||
845 | get_Src_SN(tile_bundle_bits num) | ||
846 | { | ||
847 | const unsigned int n = (unsigned int)num; | ||
848 | return (((n >> 0)) & 0x3); | ||
849 | } | ||
850 | |||
851 | static __inline unsigned int | ||
852 | get_UnOpcodeExtension_X0(tile_bundle_bits num) | ||
853 | { | ||
854 | const unsigned int n = (unsigned int)num; | ||
855 | return (((n >> 12)) & 0x1f); | ||
856 | } | ||
857 | |||
858 | static __inline unsigned int | ||
859 | get_UnOpcodeExtension_X1(tile_bundle_bits n) | ||
860 | { | ||
861 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
862 | } | ||
863 | |||
864 | static __inline unsigned int | ||
865 | get_UnOpcodeExtension_Y0(tile_bundle_bits num) | ||
866 | { | ||
867 | const unsigned int n = (unsigned int)num; | ||
868 | return (((n >> 12)) & 0x1f); | ||
869 | } | ||
870 | |||
871 | static __inline unsigned int | ||
872 | get_UnOpcodeExtension_Y1(tile_bundle_bits n) | ||
873 | { | ||
874 | return (((unsigned int)(n >> 43)) & 0x1f); | ||
875 | } | ||
876 | |||
877 | static __inline unsigned int | ||
878 | get_UnShOpcodeExtension_X0(tile_bundle_bits num) | ||
879 | { | ||
880 | const unsigned int n = (unsigned int)num; | ||
881 | return (((n >> 17)) & 0x3ff); | ||
882 | } | ||
883 | |||
884 | static __inline unsigned int | ||
885 | get_UnShOpcodeExtension_X1(tile_bundle_bits n) | ||
886 | { | ||
887 | return (((unsigned int)(n >> 48)) & 0x3ff); | ||
888 | } | ||
889 | |||
890 | static __inline unsigned int | ||
891 | get_UnShOpcodeExtension_Y0(tile_bundle_bits num) | ||
892 | { | ||
893 | const unsigned int n = (unsigned int)num; | ||
894 | return (((n >> 17)) & 0x7); | ||
895 | } | ||
896 | |||
897 | static __inline unsigned int | ||
898 | get_UnShOpcodeExtension_Y1(tile_bundle_bits n) | ||
899 | { | ||
900 | return (((unsigned int)(n >> 48)) & 0x7); | ||
901 | } | ||
902 | |||
903 | |||
904 | static __inline int | ||
905 | sign_extend(int n, int num_bits) | ||
906 | { | ||
907 | int shift = (int)(sizeof(int) * 8 - num_bits); | ||
908 | return (n << shift) >> shift; | ||
909 | } | ||
910 | |||
911 | |||
912 | |||
913 | static __inline tile_bundle_bits | ||
914 | create_BrOff_SN(int num) | ||
915 | { | ||
916 | const unsigned int n = (unsigned int)num; | ||
917 | return ((n & 0x3ff) << 0); | ||
918 | } | ||
919 | |||
920 | static __inline tile_bundle_bits | ||
921 | create_BrOff_X1(int num) | ||
922 | { | ||
923 | const unsigned int n = (unsigned int)num; | ||
924 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
925 | (((tile_bundle_bits)(n & 0x00018000)) << 20); | ||
926 | } | ||
927 | |||
928 | static __inline tile_bundle_bits | ||
929 | create_BrType_X1(int num) | ||
930 | { | ||
931 | const unsigned int n = (unsigned int)num; | ||
932 | return (((tile_bundle_bits)(n & 0xf)) << 31); | ||
933 | } | ||
934 | |||
935 | static __inline tile_bundle_bits | ||
936 | create_Dest_Imm8_X1(int num) | ||
937 | { | ||
938 | const unsigned int n = (unsigned int)num; | ||
939 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
940 | (((tile_bundle_bits)(n & 0x000000c0)) << 43); | ||
941 | } | ||
942 | |||
943 | static __inline tile_bundle_bits | ||
944 | create_Dest_SN(int num) | ||
945 | { | ||
946 | const unsigned int n = (unsigned int)num; | ||
947 | return ((n & 0x3) << 2); | ||
948 | } | ||
949 | |||
950 | static __inline tile_bundle_bits | ||
951 | create_Dest_X0(int num) | ||
952 | { | ||
953 | const unsigned int n = (unsigned int)num; | ||
954 | return ((n & 0x3f) << 0); | ||
955 | } | ||
956 | |||
957 | static __inline tile_bundle_bits | ||
958 | create_Dest_X1(int num) | ||
959 | { | ||
960 | const unsigned int n = (unsigned int)num; | ||
961 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
962 | } | ||
963 | |||
964 | static __inline tile_bundle_bits | ||
965 | create_Dest_Y0(int num) | ||
966 | { | ||
967 | const unsigned int n = (unsigned int)num; | ||
968 | return ((n & 0x3f) << 0); | ||
969 | } | ||
970 | |||
971 | static __inline tile_bundle_bits | ||
972 | create_Dest_Y1(int num) | ||
973 | { | ||
974 | const unsigned int n = (unsigned int)num; | ||
975 | return (((tile_bundle_bits)(n & 0x3f)) << 31); | ||
976 | } | ||
977 | |||
978 | static __inline tile_bundle_bits | ||
979 | create_Imm16_X0(int num) | ||
980 | { | ||
981 | const unsigned int n = (unsigned int)num; | ||
982 | return ((n & 0xffff) << 12); | ||
983 | } | ||
984 | |||
985 | static __inline tile_bundle_bits | ||
986 | create_Imm16_X1(int num) | ||
987 | { | ||
988 | const unsigned int n = (unsigned int)num; | ||
989 | return (((tile_bundle_bits)(n & 0xffff)) << 43); | ||
990 | } | ||
991 | |||
992 | static __inline tile_bundle_bits | ||
993 | create_Imm8_SN(int num) | ||
994 | { | ||
995 | const unsigned int n = (unsigned int)num; | ||
996 | return ((n & 0xff) << 0); | ||
997 | } | ||
998 | |||
999 | static __inline tile_bundle_bits | ||
1000 | create_Imm8_X0(int num) | ||
1001 | { | ||
1002 | const unsigned int n = (unsigned int)num; | ||
1003 | return ((n & 0xff) << 12); | ||
1004 | } | ||
1005 | |||
1006 | static __inline tile_bundle_bits | ||
1007 | create_Imm8_X1(int num) | ||
1008 | { | ||
1009 | const unsigned int n = (unsigned int)num; | ||
1010 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1011 | } | ||
1012 | |||
1013 | static __inline tile_bundle_bits | ||
1014 | create_Imm8_Y0(int num) | ||
1015 | { | ||
1016 | const unsigned int n = (unsigned int)num; | ||
1017 | return ((n & 0xff) << 12); | ||
1018 | } | ||
1019 | |||
1020 | static __inline tile_bundle_bits | ||
1021 | create_Imm8_Y1(int num) | ||
1022 | { | ||
1023 | const unsigned int n = (unsigned int)num; | ||
1024 | return (((tile_bundle_bits)(n & 0xff)) << 43); | ||
1025 | } | ||
1026 | |||
1027 | static __inline tile_bundle_bits | ||
1028 | create_ImmOpcodeExtension_X0(int num) | ||
1029 | { | ||
1030 | const unsigned int n = (unsigned int)num; | ||
1031 | return ((n & 0x7f) << 20); | ||
1032 | } | ||
1033 | |||
1034 | static __inline tile_bundle_bits | ||
1035 | create_ImmOpcodeExtension_X1(int num) | ||
1036 | { | ||
1037 | const unsigned int n = (unsigned int)num; | ||
1038 | return (((tile_bundle_bits)(n & 0x7f)) << 51); | ||
1039 | } | ||
1040 | |||
1041 | static __inline tile_bundle_bits | ||
1042 | create_ImmRROpcodeExtension_SN(int num) | ||
1043 | { | ||
1044 | const unsigned int n = (unsigned int)num; | ||
1045 | return ((n & 0x3) << 8); | ||
1046 | } | ||
1047 | |||
1048 | static __inline tile_bundle_bits | ||
1049 | create_JOffLong_X1(int num) | ||
1050 | { | ||
1051 | const unsigned int n = (unsigned int)num; | ||
1052 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1053 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1054 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1055 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1056 | (((tile_bundle_bits)(n & 0x18000000)) << 31); | ||
1057 | } | ||
1058 | |||
1059 | static __inline tile_bundle_bits | ||
1060 | create_JOff_X1(int num) | ||
1061 | { | ||
1062 | const unsigned int n = (unsigned int)num; | ||
1063 | return (((tile_bundle_bits)(n & 0x00007fff)) << 43) | | ||
1064 | (((tile_bundle_bits)(n & 0x00018000)) << 20) | | ||
1065 | (((tile_bundle_bits)(n & 0x001e0000)) << 14) | | ||
1066 | (((tile_bundle_bits)(n & 0x07e00000)) << 16) | | ||
1067 | (((tile_bundle_bits)(n & 0x08000000)) << 31); | ||
1068 | } | ||
1069 | |||
1070 | static __inline tile_bundle_bits | ||
1071 | create_MF_Imm15_X1(int num) | ||
1072 | { | ||
1073 | const unsigned int n = (unsigned int)num; | ||
1074 | return (((tile_bundle_bits)(n & 0x00003fff)) << 37) | | ||
1075 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1076 | } | ||
1077 | |||
1078 | static __inline tile_bundle_bits | ||
1079 | create_MMEnd_X0(int num) | ||
1080 | { | ||
1081 | const unsigned int n = (unsigned int)num; | ||
1082 | return ((n & 0x1f) << 18); | ||
1083 | } | ||
1084 | |||
1085 | static __inline tile_bundle_bits | ||
1086 | create_MMEnd_X1(int num) | ||
1087 | { | ||
1088 | const unsigned int n = (unsigned int)num; | ||
1089 | return (((tile_bundle_bits)(n & 0x1f)) << 49); | ||
1090 | } | ||
1091 | |||
1092 | static __inline tile_bundle_bits | ||
1093 | create_MMStart_X0(int num) | ||
1094 | { | ||
1095 | const unsigned int n = (unsigned int)num; | ||
1096 | return ((n & 0x1f) << 23); | ||
1097 | } | ||
1098 | |||
1099 | static __inline tile_bundle_bits | ||
1100 | create_MMStart_X1(int num) | ||
1101 | { | ||
1102 | const unsigned int n = (unsigned int)num; | ||
1103 | return (((tile_bundle_bits)(n & 0x1f)) << 54); | ||
1104 | } | ||
1105 | |||
1106 | static __inline tile_bundle_bits | ||
1107 | create_MT_Imm15_X1(int num) | ||
1108 | { | ||
1109 | const unsigned int n = (unsigned int)num; | ||
1110 | return (((tile_bundle_bits)(n & 0x0000003f)) << 31) | | ||
1111 | (((tile_bundle_bits)(n & 0x00003fc0)) << 37) | | ||
1112 | (((tile_bundle_bits)(n & 0x00004000)) << 44); | ||
1113 | } | ||
1114 | |||
1115 | static __inline tile_bundle_bits | ||
1116 | create_Mode(int num) | ||
1117 | { | ||
1118 | const unsigned int n = (unsigned int)num; | ||
1119 | return (((tile_bundle_bits)(n & 0x1)) << 63); | ||
1120 | } | ||
1121 | |||
1122 | static __inline tile_bundle_bits | ||
1123 | create_NoRegOpcodeExtension_SN(int num) | ||
1124 | { | ||
1125 | const unsigned int n = (unsigned int)num; | ||
1126 | return ((n & 0xf) << 0); | ||
1127 | } | ||
1128 | |||
1129 | static __inline tile_bundle_bits | ||
1130 | create_Opcode_SN(int num) | ||
1131 | { | ||
1132 | const unsigned int n = (unsigned int)num; | ||
1133 | return ((n & 0x3f) << 10); | ||
1134 | } | ||
1135 | |||
1136 | static __inline tile_bundle_bits | ||
1137 | create_Opcode_X0(int num) | ||
1138 | { | ||
1139 | const unsigned int n = (unsigned int)num; | ||
1140 | return ((n & 0x7) << 28); | ||
1141 | } | ||
1142 | |||
1143 | static __inline tile_bundle_bits | ||
1144 | create_Opcode_X1(int num) | ||
1145 | { | ||
1146 | const unsigned int n = (unsigned int)num; | ||
1147 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1148 | } | ||
1149 | |||
1150 | static __inline tile_bundle_bits | ||
1151 | create_Opcode_Y0(int num) | ||
1152 | { | ||
1153 | const unsigned int n = (unsigned int)num; | ||
1154 | return ((n & 0xf) << 27); | ||
1155 | } | ||
1156 | |||
1157 | static __inline tile_bundle_bits | ||
1158 | create_Opcode_Y1(int num) | ||
1159 | { | ||
1160 | const unsigned int n = (unsigned int)num; | ||
1161 | return (((tile_bundle_bits)(n & 0xf)) << 59); | ||
1162 | } | ||
1163 | |||
1164 | static __inline tile_bundle_bits | ||
1165 | create_Opcode_Y2(int num) | ||
1166 | { | ||
1167 | const unsigned int n = (unsigned int)num; | ||
1168 | return (((tile_bundle_bits)(n & 0x7)) << 56); | ||
1169 | } | ||
1170 | |||
1171 | static __inline tile_bundle_bits | ||
1172 | create_RROpcodeExtension_SN(int num) | ||
1173 | { | ||
1174 | const unsigned int n = (unsigned int)num; | ||
1175 | return ((n & 0xf) << 4); | ||
1176 | } | ||
1177 | |||
1178 | static __inline tile_bundle_bits | ||
1179 | create_RRROpcodeExtension_X0(int num) | ||
1180 | { | ||
1181 | const unsigned int n = (unsigned int)num; | ||
1182 | return ((n & 0x1ff) << 18); | ||
1183 | } | ||
1184 | |||
1185 | static __inline tile_bundle_bits | ||
1186 | create_RRROpcodeExtension_X1(int num) | ||
1187 | { | ||
1188 | const unsigned int n = (unsigned int)num; | ||
1189 | return (((tile_bundle_bits)(n & 0x1ff)) << 49); | ||
1190 | } | ||
1191 | |||
1192 | static __inline tile_bundle_bits | ||
1193 | create_RRROpcodeExtension_Y0(int num) | ||
1194 | { | ||
1195 | const unsigned int n = (unsigned int)num; | ||
1196 | return ((n & 0x3) << 18); | ||
1197 | } | ||
1198 | |||
1199 | static __inline tile_bundle_bits | ||
1200 | create_RRROpcodeExtension_Y1(int num) | ||
1201 | { | ||
1202 | const unsigned int n = (unsigned int)num; | ||
1203 | return (((tile_bundle_bits)(n & 0x3)) << 49); | ||
1204 | } | ||
1205 | |||
1206 | static __inline tile_bundle_bits | ||
1207 | create_RouteOpcodeExtension_SN(int num) | ||
1208 | { | ||
1209 | const unsigned int n = (unsigned int)num; | ||
1210 | return ((n & 0x3ff) << 0); | ||
1211 | } | ||
1212 | |||
1213 | static __inline tile_bundle_bits | ||
1214 | create_S_X0(int num) | ||
1215 | { | ||
1216 | const unsigned int n = (unsigned int)num; | ||
1217 | return ((n & 0x1) << 27); | ||
1218 | } | ||
1219 | |||
1220 | static __inline tile_bundle_bits | ||
1221 | create_S_X1(int num) | ||
1222 | { | ||
1223 | const unsigned int n = (unsigned int)num; | ||
1224 | return (((tile_bundle_bits)(n & 0x1)) << 58); | ||
1225 | } | ||
1226 | |||
1227 | static __inline tile_bundle_bits | ||
1228 | create_ShAmt_X0(int num) | ||
1229 | { | ||
1230 | const unsigned int n = (unsigned int)num; | ||
1231 | return ((n & 0x1f) << 12); | ||
1232 | } | ||
1233 | |||
1234 | static __inline tile_bundle_bits | ||
1235 | create_ShAmt_X1(int num) | ||
1236 | { | ||
1237 | const unsigned int n = (unsigned int)num; | ||
1238 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1239 | } | ||
1240 | |||
1241 | static __inline tile_bundle_bits | ||
1242 | create_ShAmt_Y0(int num) | ||
1243 | { | ||
1244 | const unsigned int n = (unsigned int)num; | ||
1245 | return ((n & 0x1f) << 12); | ||
1246 | } | ||
1247 | |||
1248 | static __inline tile_bundle_bits | ||
1249 | create_ShAmt_Y1(int num) | ||
1250 | { | ||
1251 | const unsigned int n = (unsigned int)num; | ||
1252 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1253 | } | ||
1254 | |||
1255 | static __inline tile_bundle_bits | ||
1256 | create_SrcA_X0(int num) | ||
1257 | { | ||
1258 | const unsigned int n = (unsigned int)num; | ||
1259 | return ((n & 0x3f) << 6); | ||
1260 | } | ||
1261 | |||
1262 | static __inline tile_bundle_bits | ||
1263 | create_SrcA_X1(int num) | ||
1264 | { | ||
1265 | const unsigned int n = (unsigned int)num; | ||
1266 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1267 | } | ||
1268 | |||
1269 | static __inline tile_bundle_bits | ||
1270 | create_SrcA_Y0(int num) | ||
1271 | { | ||
1272 | const unsigned int n = (unsigned int)num; | ||
1273 | return ((n & 0x3f) << 6); | ||
1274 | } | ||
1275 | |||
1276 | static __inline tile_bundle_bits | ||
1277 | create_SrcA_Y1(int num) | ||
1278 | { | ||
1279 | const unsigned int n = (unsigned int)num; | ||
1280 | return (((tile_bundle_bits)(n & 0x3f)) << 37); | ||
1281 | } | ||
1282 | |||
1283 | static __inline tile_bundle_bits | ||
1284 | create_SrcA_Y2(int num) | ||
1285 | { | ||
1286 | const unsigned int n = (unsigned int)num; | ||
1287 | return ((n & 0x00000001) << 26) | | ||
1288 | (((tile_bundle_bits)(n & 0x0000003e)) << 50); | ||
1289 | } | ||
1290 | |||
1291 | static __inline tile_bundle_bits | ||
1292 | create_SrcBDest_Y2(int num) | ||
1293 | { | ||
1294 | const unsigned int n = (unsigned int)num; | ||
1295 | return ((n & 0x3f) << 20); | ||
1296 | } | ||
1297 | |||
1298 | static __inline tile_bundle_bits | ||
1299 | create_SrcB_X0(int num) | ||
1300 | { | ||
1301 | const unsigned int n = (unsigned int)num; | ||
1302 | return ((n & 0x3f) << 12); | ||
1303 | } | ||
1304 | |||
1305 | static __inline tile_bundle_bits | ||
1306 | create_SrcB_X1(int num) | ||
1307 | { | ||
1308 | const unsigned int n = (unsigned int)num; | ||
1309 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1310 | } | ||
1311 | |||
1312 | static __inline tile_bundle_bits | ||
1313 | create_SrcB_Y0(int num) | ||
1314 | { | ||
1315 | const unsigned int n = (unsigned int)num; | ||
1316 | return ((n & 0x3f) << 12); | ||
1317 | } | ||
1318 | |||
1319 | static __inline tile_bundle_bits | ||
1320 | create_SrcB_Y1(int num) | ||
1321 | { | ||
1322 | const unsigned int n = (unsigned int)num; | ||
1323 | return (((tile_bundle_bits)(n & 0x3f)) << 43); | ||
1324 | } | ||
1325 | |||
1326 | static __inline tile_bundle_bits | ||
1327 | create_Src_SN(int num) | ||
1328 | { | ||
1329 | const unsigned int n = (unsigned int)num; | ||
1330 | return ((n & 0x3) << 0); | ||
1331 | } | ||
1332 | |||
1333 | static __inline tile_bundle_bits | ||
1334 | create_UnOpcodeExtension_X0(int num) | ||
1335 | { | ||
1336 | const unsigned int n = (unsigned int)num; | ||
1337 | return ((n & 0x1f) << 12); | ||
1338 | } | ||
1339 | |||
1340 | static __inline tile_bundle_bits | ||
1341 | create_UnOpcodeExtension_X1(int num) | ||
1342 | { | ||
1343 | const unsigned int n = (unsigned int)num; | ||
1344 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1345 | } | ||
1346 | |||
1347 | static __inline tile_bundle_bits | ||
1348 | create_UnOpcodeExtension_Y0(int num) | ||
1349 | { | ||
1350 | const unsigned int n = (unsigned int)num; | ||
1351 | return ((n & 0x1f) << 12); | ||
1352 | } | ||
1353 | |||
1354 | static __inline tile_bundle_bits | ||
1355 | create_UnOpcodeExtension_Y1(int num) | ||
1356 | { | ||
1357 | const unsigned int n = (unsigned int)num; | ||
1358 | return (((tile_bundle_bits)(n & 0x1f)) << 43); | ||
1359 | } | ||
1360 | |||
1361 | static __inline tile_bundle_bits | ||
1362 | create_UnShOpcodeExtension_X0(int num) | ||
1363 | { | ||
1364 | const unsigned int n = (unsigned int)num; | ||
1365 | return ((n & 0x3ff) << 17); | ||
1366 | } | ||
1367 | |||
1368 | static __inline tile_bundle_bits | ||
1369 | create_UnShOpcodeExtension_X1(int num) | ||
1370 | { | ||
1371 | const unsigned int n = (unsigned int)num; | ||
1372 | return (((tile_bundle_bits)(n & 0x3ff)) << 48); | ||
1373 | } | ||
1374 | |||
1375 | static __inline tile_bundle_bits | ||
1376 | create_UnShOpcodeExtension_Y0(int num) | ||
1377 | { | ||
1378 | const unsigned int n = (unsigned int)num; | ||
1379 | return ((n & 0x7) << 17); | ||
1380 | } | ||
1381 | |||
1382 | static __inline tile_bundle_bits | ||
1383 | create_UnShOpcodeExtension_Y1(int num) | ||
1384 | { | ||
1385 | const unsigned int n = (unsigned int)num; | ||
1386 | return (((tile_bundle_bits)(n & 0x7)) << 48); | ||
1387 | } | ||
1388 | |||
1389 | |||
1390 | typedef unsigned short tile_sn_instruction_bits; | ||
1391 | |||
1392 | |||
1393 | typedef enum | ||
1394 | { | ||
1395 | TILE_PIPELINE_X0, | ||
1396 | TILE_PIPELINE_X1, | ||
1397 | TILE_PIPELINE_Y0, | ||
1398 | TILE_PIPELINE_Y1, | ||
1399 | TILE_PIPELINE_Y2, | ||
1400 | } tile_pipeline; | ||
1401 | |||
1402 | #define tile_is_x_pipeline(p) ((int)(p) <= (int)TILE_PIPELINE_X1) | ||
1403 | |||
1404 | typedef enum | ||
1405 | { | ||
1406 | TILE_OP_TYPE_REGISTER, | ||
1407 | TILE_OP_TYPE_IMMEDIATE, | ||
1408 | TILE_OP_TYPE_ADDRESS, | ||
1409 | TILE_OP_TYPE_SPR | ||
1410 | } tile_operand_type; | ||
1411 | |||
1412 | /* This is the bit that determines if a bundle is in the Y encoding. */ | ||
1413 | #define TILE_BUNDLE_Y_ENCODING_MASK ((tile_bundle_bits)1 << 63) | ||
1414 | |||
1415 | enum | ||
1416 | { | ||
1417 | /* Maximum number of instructions in a bundle (2 for X, 3 for Y). */ | ||
1418 | TILE_MAX_INSTRUCTIONS_PER_BUNDLE = 3, | ||
1419 | |||
1420 | /* How many different pipeline encodings are there? X0, X1, Y0, Y1, Y2. */ | ||
1421 | TILE_NUM_PIPELINE_ENCODINGS = 5, | ||
1422 | |||
1423 | /* Log base 2 of TILE_BUNDLE_SIZE_IN_BYTES. */ | ||
1424 | TILE_LOG2_BUNDLE_SIZE_IN_BYTES = 3, | ||
1425 | |||
1426 | /* Instructions take this many bytes. */ | ||
1427 | TILE_BUNDLE_SIZE_IN_BYTES = 1 << TILE_LOG2_BUNDLE_SIZE_IN_BYTES, | ||
1428 | |||
1429 | /* Log base 2 of TILE_BUNDLE_ALIGNMENT_IN_BYTES. */ | ||
1430 | TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES = 3, | ||
1431 | |||
1432 | /* Bundles should be aligned modulo this number of bytes. */ | ||
1433 | TILE_BUNDLE_ALIGNMENT_IN_BYTES = | ||
1434 | (1 << TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES), | ||
1435 | |||
1436 | /* Log base 2 of TILE_SN_INSTRUCTION_SIZE_IN_BYTES. */ | ||
1437 | TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES = 1, | ||
1438 | |||
1439 | /* Static network instructions take this many bytes. */ | ||
1440 | TILE_SN_INSTRUCTION_SIZE_IN_BYTES = | ||
1441 | (1 << TILE_LOG2_SN_INSTRUCTION_SIZE_IN_BYTES), | ||
1442 | |||
1443 | /* Number of registers (some are magic, such as network I/O). */ | ||
1444 | TILE_NUM_REGISTERS = 64, | ||
1445 | |||
1446 | /* Number of static network registers. */ | ||
1447 | TILE_NUM_SN_REGISTERS = 4 | ||
1448 | }; | ||
1449 | |||
1450 | |||
1451 | struct tile_operand | ||
1452 | { | ||
1453 | /* Is this operand a register, immediate or address? */ | ||
1454 | tile_operand_type type; | ||
1455 | |||
1456 | /* The default relocation type for this operand. */ | ||
1457 | signed int default_reloc : 16; | ||
1458 | |||
1459 | /* How many bits is this value? (used for range checking) */ | ||
1460 | unsigned int num_bits : 5; | ||
1461 | |||
1462 | /* Is the value signed? (used for range checking) */ | ||
1463 | unsigned int is_signed : 1; | ||
1464 | |||
1465 | /* Is this operand a source register? */ | ||
1466 | unsigned int is_src_reg : 1; | ||
1467 | |||
1468 | /* Is this operand written? (i.e. is it a destination register) */ | ||
1469 | unsigned int is_dest_reg : 1; | ||
1470 | |||
1471 | /* Is this operand PC-relative? */ | ||
1472 | unsigned int is_pc_relative : 1; | ||
1473 | |||
1474 | /* By how many bits do we right shift the value before inserting? */ | ||
1475 | unsigned int rightshift : 2; | ||
1476 | |||
1477 | /* Return the bits for this operand to be ORed into an existing bundle. */ | ||
1478 | tile_bundle_bits (*insert) (int op); | ||
1479 | |||
1480 | /* Extract this operand and return it. */ | ||
1481 | unsigned int (*extract) (tile_bundle_bits bundle); | ||
1482 | }; | ||
1483 | |||
1484 | |||
1485 | extern const struct tile_operand tile_operands[]; | ||
1486 | |||
1487 | /* One finite-state machine per pipe for rapid instruction decoding. */ | ||
1488 | extern const unsigned short * const | ||
1489 | tile_bundle_decoder_fsms[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1490 | |||
1491 | |||
1492 | struct tile_opcode | ||
1493 | { | ||
1494 | /* The opcode mnemonic, e.g. "add" */ | ||
1495 | const char *name; | ||
1496 | |||
1497 | /* The enum value for this mnemonic. */ | ||
1498 | tile_mnemonic mnemonic; | ||
1499 | |||
1500 | /* A bit mask of which of the five pipes this instruction | ||
1501 | is compatible with: | ||
1502 | X0 0x01 | ||
1503 | X1 0x02 | ||
1504 | Y0 0x04 | ||
1505 | Y1 0x08 | ||
1506 | Y2 0x10 */ | ||
1507 | unsigned char pipes; | ||
1508 | |||
1509 | /* How many operands are there? */ | ||
1510 | unsigned char num_operands; | ||
1511 | |||
1512 | /* Which register does this write implicitly, or TREG_ZERO if none? */ | ||
1513 | unsigned char implicitly_written_register; | ||
1514 | |||
1515 | /* Can this be bundled with other instructions (almost always true). */ | ||
1516 | unsigned char can_bundle; | ||
1517 | |||
1518 | /* The description of the operands. Each of these is an | ||
1519 | * index into the tile_operands[] table. */ | ||
1520 | unsigned char operands[TILE_NUM_PIPELINE_ENCODINGS][TILE_MAX_OPERANDS]; | ||
1521 | |||
1522 | /* A mask of which bits have predefined values for each pipeline. | ||
1523 | * This is useful for disassembly. */ | ||
1524 | tile_bundle_bits fixed_bit_masks[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1525 | |||
1526 | /* For each bit set in fixed_bit_masks, what the value is for this | ||
1527 | * instruction. */ | ||
1528 | tile_bundle_bits fixed_bit_values[TILE_NUM_PIPELINE_ENCODINGS]; | ||
1529 | }; | ||
1530 | |||
1531 | extern const struct tile_opcode tile_opcodes[]; | ||
1532 | |||
1533 | struct tile_sn_opcode | ||
1534 | { | ||
1535 | /* The opcode mnemonic, e.g. "add" */ | ||
1536 | const char *name; | ||
1537 | |||
1538 | /* The enum value for this mnemonic. */ | ||
1539 | tile_sn_mnemonic mnemonic; | ||
1540 | |||
1541 | /* How many operands are there? */ | ||
1542 | unsigned char num_operands; | ||
1543 | |||
1544 | /* The description of the operands. Each of these is an | ||
1545 | * index into the tile_operands[] table. */ | ||
1546 | unsigned char operands[TILE_SN_MAX_OPERANDS]; | ||
1547 | |||
1548 | /* A mask of which bits have predefined values. | ||
1549 | * This is useful for disassembly. */ | ||
1550 | tile_sn_instruction_bits fixed_bit_mask; | ||
1551 | |||
1552 | /* For each bit set in fixed_bit_masks, what its value is. */ | ||
1553 | tile_sn_instruction_bits fixed_bit_values; | ||
1554 | }; | ||
1555 | |||
1556 | extern const struct tile_sn_opcode tile_sn_opcodes[]; | ||
1557 | |||
1558 | /* Used for non-textual disassembly into structs. */ | ||
1559 | struct tile_decoded_instruction | ||
1560 | { | ||
1561 | const struct tile_opcode *opcode; | ||
1562 | const struct tile_operand *operands[TILE_MAX_OPERANDS]; | ||
1563 | int operand_values[TILE_MAX_OPERANDS]; | ||
1564 | }; | ||
1565 | |||
1566 | |||
1567 | /* Disassemble a bundle into a struct for machine processing. */ | ||
1568 | extern int parse_insn_tile(tile_bundle_bits bits, | ||
1569 | unsigned int pc, | ||
1570 | struct tile_decoded_instruction | ||
1571 | decoded[TILE_MAX_INSTRUCTIONS_PER_BUNDLE]); | ||
1572 | |||
1573 | |||
1574 | /* Canonical names of all the registers. */ | ||
1575 | /* ISSUE: This table lives in "tile-dis.c" */ | ||
1576 | extern const char * const tile_register_names[]; | ||
1577 | |||
1578 | /* Descriptor for a special-purpose register. */ | ||
1579 | struct tile_spr | ||
1580 | { | ||
1581 | /* The number */ | ||
1582 | int number; | ||
1583 | |||
1584 | /* The name */ | ||
1585 | const char *name; | ||
1586 | }; | ||
1587 | |||
1588 | /* List of all the SPRs; ordered by increasing number. */ | ||
1589 | extern const struct tile_spr tile_sprs[]; | ||
1590 | |||
1591 | /* Number of special-purpose registers. */ | ||
1592 | extern const int tile_num_sprs; | ||
1593 | |||
1594 | extern const char * | ||
1595 | get_tile_spr_name (int num); | ||
1596 | |||
1597 | #endif /* opcode_tile_h */ | ||
diff --git a/arch/tile/include/asm/opcode_constants.h b/arch/tile/include/asm/opcode_constants.h new file mode 100644 index 000000000000..37a9f2958cb1 --- /dev/null +++ b/arch/tile/include/asm/opcode_constants.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_OPCODE_CONSTANTS_H | ||
16 | #define _ASM_TILE_OPCODE_CONSTANTS_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | |||
20 | #if CHIP_WORD_SIZE() == 64 | ||
21 | #include <asm/opcode_constants_64.h> | ||
22 | #else | ||
23 | #include <asm/opcode_constants_32.h> | ||
24 | #endif | ||
25 | |||
26 | #endif /* _ASM_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_32.h b/arch/tile/include/asm/opcode_constants_32.h new file mode 100644 index 000000000000..227d033b180c --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_32.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/opcode_constants_64.h b/arch/tile/include/asm/opcode_constants_64.h new file mode 100644 index 000000000000..227d033b180c --- /dev/null +++ b/arch/tile/include/asm/opcode_constants_64.h | |||
@@ -0,0 +1,480 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /* This file is machine-generated; DO NOT EDIT! */ | ||
16 | |||
17 | |||
18 | #ifndef _TILE_OPCODE_CONSTANTS_H | ||
19 | #define _TILE_OPCODE_CONSTANTS_H | ||
20 | enum | ||
21 | { | ||
22 | ADDBS_U_SPECIAL_0_OPCODE_X0 = 98, | ||
23 | ADDBS_U_SPECIAL_0_OPCODE_X1 = 68, | ||
24 | ADDB_SPECIAL_0_OPCODE_X0 = 1, | ||
25 | ADDB_SPECIAL_0_OPCODE_X1 = 1, | ||
26 | ADDHS_SPECIAL_0_OPCODE_X0 = 99, | ||
27 | ADDHS_SPECIAL_0_OPCODE_X1 = 69, | ||
28 | ADDH_SPECIAL_0_OPCODE_X0 = 2, | ||
29 | ADDH_SPECIAL_0_OPCODE_X1 = 2, | ||
30 | ADDIB_IMM_0_OPCODE_X0 = 1, | ||
31 | ADDIB_IMM_0_OPCODE_X1 = 1, | ||
32 | ADDIH_IMM_0_OPCODE_X0 = 2, | ||
33 | ADDIH_IMM_0_OPCODE_X1 = 2, | ||
34 | ADDI_IMM_0_OPCODE_X0 = 3, | ||
35 | ADDI_IMM_0_OPCODE_X1 = 3, | ||
36 | ADDI_IMM_1_OPCODE_SN = 1, | ||
37 | ADDI_OPCODE_Y0 = 9, | ||
38 | ADDI_OPCODE_Y1 = 7, | ||
39 | ADDLIS_OPCODE_X0 = 1, | ||
40 | ADDLIS_OPCODE_X1 = 2, | ||
41 | ADDLI_OPCODE_X0 = 2, | ||
42 | ADDLI_OPCODE_X1 = 3, | ||
43 | ADDS_SPECIAL_0_OPCODE_X0 = 96, | ||
44 | ADDS_SPECIAL_0_OPCODE_X1 = 66, | ||
45 | ADD_SPECIAL_0_OPCODE_X0 = 3, | ||
46 | ADD_SPECIAL_0_OPCODE_X1 = 3, | ||
47 | ADD_SPECIAL_0_OPCODE_Y0 = 0, | ||
48 | ADD_SPECIAL_0_OPCODE_Y1 = 0, | ||
49 | ADIFFB_U_SPECIAL_0_OPCODE_X0 = 4, | ||
50 | ADIFFH_SPECIAL_0_OPCODE_X0 = 5, | ||
51 | ANDI_IMM_0_OPCODE_X0 = 1, | ||
52 | ANDI_IMM_0_OPCODE_X1 = 4, | ||
53 | ANDI_OPCODE_Y0 = 10, | ||
54 | ANDI_OPCODE_Y1 = 8, | ||
55 | AND_SPECIAL_0_OPCODE_X0 = 6, | ||
56 | AND_SPECIAL_0_OPCODE_X1 = 4, | ||
57 | AND_SPECIAL_2_OPCODE_Y0 = 0, | ||
58 | AND_SPECIAL_2_OPCODE_Y1 = 0, | ||
59 | AULI_OPCODE_X0 = 3, | ||
60 | AULI_OPCODE_X1 = 4, | ||
61 | AVGB_U_SPECIAL_0_OPCODE_X0 = 7, | ||
62 | AVGH_SPECIAL_0_OPCODE_X0 = 8, | ||
63 | BBNST_BRANCH_OPCODE_X1 = 15, | ||
64 | BBNS_BRANCH_OPCODE_X1 = 14, | ||
65 | BBNS_OPCODE_SN = 63, | ||
66 | BBST_BRANCH_OPCODE_X1 = 13, | ||
67 | BBS_BRANCH_OPCODE_X1 = 12, | ||
68 | BBS_OPCODE_SN = 62, | ||
69 | BGEZT_BRANCH_OPCODE_X1 = 7, | ||
70 | BGEZ_BRANCH_OPCODE_X1 = 6, | ||
71 | BGEZ_OPCODE_SN = 61, | ||
72 | BGZT_BRANCH_OPCODE_X1 = 5, | ||
73 | BGZ_BRANCH_OPCODE_X1 = 4, | ||
74 | BGZ_OPCODE_SN = 58, | ||
75 | BITX_UN_0_SHUN_0_OPCODE_X0 = 1, | ||
76 | BITX_UN_0_SHUN_0_OPCODE_Y0 = 1, | ||
77 | BLEZT_BRANCH_OPCODE_X1 = 11, | ||
78 | BLEZ_BRANCH_OPCODE_X1 = 10, | ||
79 | BLEZ_OPCODE_SN = 59, | ||
80 | BLZT_BRANCH_OPCODE_X1 = 9, | ||
81 | BLZ_BRANCH_OPCODE_X1 = 8, | ||
82 | BLZ_OPCODE_SN = 60, | ||
83 | BNZT_BRANCH_OPCODE_X1 = 3, | ||
84 | BNZ_BRANCH_OPCODE_X1 = 2, | ||
85 | BNZ_OPCODE_SN = 57, | ||
86 | BPT_NOREG_RR_IMM_0_OPCODE_SN = 1, | ||
87 | BRANCH_OPCODE_X1 = 5, | ||
88 | BYTEX_UN_0_SHUN_0_OPCODE_X0 = 2, | ||
89 | BYTEX_UN_0_SHUN_0_OPCODE_Y0 = 2, | ||
90 | BZT_BRANCH_OPCODE_X1 = 1, | ||
91 | BZ_BRANCH_OPCODE_X1 = 0, | ||
92 | BZ_OPCODE_SN = 56, | ||
93 | CLZ_UN_0_SHUN_0_OPCODE_X0 = 3, | ||
94 | CLZ_UN_0_SHUN_0_OPCODE_Y0 = 3, | ||
95 | CRC32_32_SPECIAL_0_OPCODE_X0 = 9, | ||
96 | CRC32_8_SPECIAL_0_OPCODE_X0 = 10, | ||
97 | CTZ_UN_0_SHUN_0_OPCODE_X0 = 4, | ||
98 | CTZ_UN_0_SHUN_0_OPCODE_Y0 = 4, | ||
99 | DRAIN_UN_0_SHUN_0_OPCODE_X1 = 1, | ||
100 | DTLBPR_UN_0_SHUN_0_OPCODE_X1 = 2, | ||
101 | DWORD_ALIGN_SPECIAL_0_OPCODE_X0 = 95, | ||
102 | FINV_UN_0_SHUN_0_OPCODE_X1 = 3, | ||
103 | FLUSH_UN_0_SHUN_0_OPCODE_X1 = 4, | ||
104 | FNOP_NOREG_RR_IMM_0_OPCODE_SN = 3, | ||
105 | FNOP_UN_0_SHUN_0_OPCODE_X0 = 5, | ||
106 | FNOP_UN_0_SHUN_0_OPCODE_X1 = 5, | ||
107 | FNOP_UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
108 | FNOP_UN_0_SHUN_0_OPCODE_Y1 = 1, | ||
109 | HALT_NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
110 | ICOH_UN_0_SHUN_0_OPCODE_X1 = 6, | ||
111 | ILL_UN_0_SHUN_0_OPCODE_X1 = 7, | ||
112 | ILL_UN_0_SHUN_0_OPCODE_Y1 = 2, | ||
113 | IMM_0_OPCODE_SN = 0, | ||
114 | IMM_0_OPCODE_X0 = 4, | ||
115 | IMM_0_OPCODE_X1 = 6, | ||
116 | IMM_1_OPCODE_SN = 1, | ||
117 | IMM_OPCODE_0_X0 = 5, | ||
118 | INTHB_SPECIAL_0_OPCODE_X0 = 11, | ||
119 | INTHB_SPECIAL_0_OPCODE_X1 = 5, | ||
120 | INTHH_SPECIAL_0_OPCODE_X0 = 12, | ||
121 | INTHH_SPECIAL_0_OPCODE_X1 = 6, | ||
122 | INTLB_SPECIAL_0_OPCODE_X0 = 13, | ||
123 | INTLB_SPECIAL_0_OPCODE_X1 = 7, | ||
124 | INTLH_SPECIAL_0_OPCODE_X0 = 14, | ||
125 | INTLH_SPECIAL_0_OPCODE_X1 = 8, | ||
126 | INV_UN_0_SHUN_0_OPCODE_X1 = 8, | ||
127 | IRET_UN_0_SHUN_0_OPCODE_X1 = 9, | ||
128 | JALB_OPCODE_X1 = 13, | ||
129 | JALF_OPCODE_X1 = 12, | ||
130 | JALRP_SPECIAL_0_OPCODE_X1 = 9, | ||
131 | JALRR_IMM_1_OPCODE_SN = 3, | ||
132 | JALR_RR_IMM_0_OPCODE_SN = 5, | ||
133 | JALR_SPECIAL_0_OPCODE_X1 = 10, | ||
134 | JB_OPCODE_X1 = 11, | ||
135 | JF_OPCODE_X1 = 10, | ||
136 | JRP_SPECIAL_0_OPCODE_X1 = 11, | ||
137 | JRR_IMM_1_OPCODE_SN = 2, | ||
138 | JR_RR_IMM_0_OPCODE_SN = 4, | ||
139 | JR_SPECIAL_0_OPCODE_X1 = 12, | ||
140 | LBADD_IMM_0_OPCODE_X1 = 22, | ||
141 | LBADD_U_IMM_0_OPCODE_X1 = 23, | ||
142 | LB_OPCODE_Y2 = 0, | ||
143 | LB_UN_0_SHUN_0_OPCODE_X1 = 10, | ||
144 | LB_U_OPCODE_Y2 = 1, | ||
145 | LB_U_UN_0_SHUN_0_OPCODE_X1 = 11, | ||
146 | LHADD_IMM_0_OPCODE_X1 = 24, | ||
147 | LHADD_U_IMM_0_OPCODE_X1 = 25, | ||
148 | LH_OPCODE_Y2 = 2, | ||
149 | LH_UN_0_SHUN_0_OPCODE_X1 = 12, | ||
150 | LH_U_OPCODE_Y2 = 3, | ||
151 | LH_U_UN_0_SHUN_0_OPCODE_X1 = 13, | ||
152 | LNK_SPECIAL_0_OPCODE_X1 = 13, | ||
153 | LWADD_IMM_0_OPCODE_X1 = 26, | ||
154 | LWADD_NA_IMM_0_OPCODE_X1 = 27, | ||
155 | LW_NA_UN_0_SHUN_0_OPCODE_X1 = 24, | ||
156 | LW_OPCODE_Y2 = 4, | ||
157 | LW_UN_0_SHUN_0_OPCODE_X1 = 14, | ||
158 | MAXB_U_SPECIAL_0_OPCODE_X0 = 15, | ||
159 | MAXB_U_SPECIAL_0_OPCODE_X1 = 14, | ||
160 | MAXH_SPECIAL_0_OPCODE_X0 = 16, | ||
161 | MAXH_SPECIAL_0_OPCODE_X1 = 15, | ||
162 | MAXIB_U_IMM_0_OPCODE_X0 = 4, | ||
163 | MAXIB_U_IMM_0_OPCODE_X1 = 5, | ||
164 | MAXIH_IMM_0_OPCODE_X0 = 5, | ||
165 | MAXIH_IMM_0_OPCODE_X1 = 6, | ||
166 | MFSPR_IMM_0_OPCODE_X1 = 7, | ||
167 | MF_UN_0_SHUN_0_OPCODE_X1 = 15, | ||
168 | MINB_U_SPECIAL_0_OPCODE_X0 = 17, | ||
169 | MINB_U_SPECIAL_0_OPCODE_X1 = 16, | ||
170 | MINH_SPECIAL_0_OPCODE_X0 = 18, | ||
171 | MINH_SPECIAL_0_OPCODE_X1 = 17, | ||
172 | MINIB_U_IMM_0_OPCODE_X0 = 6, | ||
173 | MINIB_U_IMM_0_OPCODE_X1 = 8, | ||
174 | MINIH_IMM_0_OPCODE_X0 = 7, | ||
175 | MINIH_IMM_0_OPCODE_X1 = 9, | ||
176 | MM_OPCODE_X0 = 6, | ||
177 | MM_OPCODE_X1 = 7, | ||
178 | MNZB_SPECIAL_0_OPCODE_X0 = 19, | ||
179 | MNZB_SPECIAL_0_OPCODE_X1 = 18, | ||
180 | MNZH_SPECIAL_0_OPCODE_X0 = 20, | ||
181 | MNZH_SPECIAL_0_OPCODE_X1 = 19, | ||
182 | MNZ_SPECIAL_0_OPCODE_X0 = 21, | ||
183 | MNZ_SPECIAL_0_OPCODE_X1 = 20, | ||
184 | MNZ_SPECIAL_1_OPCODE_Y0 = 0, | ||
185 | MNZ_SPECIAL_1_OPCODE_Y1 = 1, | ||
186 | MOVEI_IMM_1_OPCODE_SN = 0, | ||
187 | MOVE_RR_IMM_0_OPCODE_SN = 8, | ||
188 | MTSPR_IMM_0_OPCODE_X1 = 10, | ||
189 | MULHHA_SS_SPECIAL_0_OPCODE_X0 = 22, | ||
190 | MULHHA_SS_SPECIAL_7_OPCODE_Y0 = 0, | ||
191 | MULHHA_SU_SPECIAL_0_OPCODE_X0 = 23, | ||
192 | MULHHA_UU_SPECIAL_0_OPCODE_X0 = 24, | ||
193 | MULHHA_UU_SPECIAL_7_OPCODE_Y0 = 1, | ||
194 | MULHHSA_UU_SPECIAL_0_OPCODE_X0 = 25, | ||
195 | MULHH_SS_SPECIAL_0_OPCODE_X0 = 26, | ||
196 | MULHH_SS_SPECIAL_6_OPCODE_Y0 = 0, | ||
197 | MULHH_SU_SPECIAL_0_OPCODE_X0 = 27, | ||
198 | MULHH_UU_SPECIAL_0_OPCODE_X0 = 28, | ||
199 | MULHH_UU_SPECIAL_6_OPCODE_Y0 = 1, | ||
200 | MULHLA_SS_SPECIAL_0_OPCODE_X0 = 29, | ||
201 | MULHLA_SU_SPECIAL_0_OPCODE_X0 = 30, | ||
202 | MULHLA_US_SPECIAL_0_OPCODE_X0 = 31, | ||
203 | MULHLA_UU_SPECIAL_0_OPCODE_X0 = 32, | ||
204 | MULHLSA_UU_SPECIAL_0_OPCODE_X0 = 33, | ||
205 | MULHLSA_UU_SPECIAL_5_OPCODE_Y0 = 0, | ||
206 | MULHL_SS_SPECIAL_0_OPCODE_X0 = 34, | ||
207 | MULHL_SU_SPECIAL_0_OPCODE_X0 = 35, | ||
208 | MULHL_US_SPECIAL_0_OPCODE_X0 = 36, | ||
209 | MULHL_UU_SPECIAL_0_OPCODE_X0 = 37, | ||
210 | MULLLA_SS_SPECIAL_0_OPCODE_X0 = 38, | ||
211 | MULLLA_SS_SPECIAL_7_OPCODE_Y0 = 2, | ||
212 | MULLLA_SU_SPECIAL_0_OPCODE_X0 = 39, | ||
213 | MULLLA_UU_SPECIAL_0_OPCODE_X0 = 40, | ||
214 | MULLLA_UU_SPECIAL_7_OPCODE_Y0 = 3, | ||
215 | MULLLSA_UU_SPECIAL_0_OPCODE_X0 = 41, | ||
216 | MULLL_SS_SPECIAL_0_OPCODE_X0 = 42, | ||
217 | MULLL_SS_SPECIAL_6_OPCODE_Y0 = 2, | ||
218 | MULLL_SU_SPECIAL_0_OPCODE_X0 = 43, | ||
219 | MULLL_UU_SPECIAL_0_OPCODE_X0 = 44, | ||
220 | MULLL_UU_SPECIAL_6_OPCODE_Y0 = 3, | ||
221 | MVNZ_SPECIAL_0_OPCODE_X0 = 45, | ||
222 | MVNZ_SPECIAL_1_OPCODE_Y0 = 1, | ||
223 | MVZ_SPECIAL_0_OPCODE_X0 = 46, | ||
224 | MVZ_SPECIAL_1_OPCODE_Y0 = 2, | ||
225 | MZB_SPECIAL_0_OPCODE_X0 = 47, | ||
226 | MZB_SPECIAL_0_OPCODE_X1 = 21, | ||
227 | MZH_SPECIAL_0_OPCODE_X0 = 48, | ||
228 | MZH_SPECIAL_0_OPCODE_X1 = 22, | ||
229 | MZ_SPECIAL_0_OPCODE_X0 = 49, | ||
230 | MZ_SPECIAL_0_OPCODE_X1 = 23, | ||
231 | MZ_SPECIAL_1_OPCODE_Y0 = 3, | ||
232 | MZ_SPECIAL_1_OPCODE_Y1 = 2, | ||
233 | NAP_UN_0_SHUN_0_OPCODE_X1 = 16, | ||
234 | NOP_NOREG_RR_IMM_0_OPCODE_SN = 2, | ||
235 | NOP_UN_0_SHUN_0_OPCODE_X0 = 6, | ||
236 | NOP_UN_0_SHUN_0_OPCODE_X1 = 17, | ||
237 | NOP_UN_0_SHUN_0_OPCODE_Y0 = 6, | ||
238 | NOP_UN_0_SHUN_0_OPCODE_Y1 = 3, | ||
239 | NOREG_RR_IMM_0_OPCODE_SN = 0, | ||
240 | NOR_SPECIAL_0_OPCODE_X0 = 50, | ||
241 | NOR_SPECIAL_0_OPCODE_X1 = 24, | ||
242 | NOR_SPECIAL_2_OPCODE_Y0 = 1, | ||
243 | NOR_SPECIAL_2_OPCODE_Y1 = 1, | ||
244 | ORI_IMM_0_OPCODE_X0 = 8, | ||
245 | ORI_IMM_0_OPCODE_X1 = 11, | ||
246 | ORI_OPCODE_Y0 = 11, | ||
247 | ORI_OPCODE_Y1 = 9, | ||
248 | OR_SPECIAL_0_OPCODE_X0 = 51, | ||
249 | OR_SPECIAL_0_OPCODE_X1 = 25, | ||
250 | OR_SPECIAL_2_OPCODE_Y0 = 2, | ||
251 | OR_SPECIAL_2_OPCODE_Y1 = 2, | ||
252 | PACKBS_U_SPECIAL_0_OPCODE_X0 = 103, | ||
253 | PACKBS_U_SPECIAL_0_OPCODE_X1 = 73, | ||
254 | PACKHB_SPECIAL_0_OPCODE_X0 = 52, | ||
255 | PACKHB_SPECIAL_0_OPCODE_X1 = 26, | ||
256 | PACKHS_SPECIAL_0_OPCODE_X0 = 102, | ||
257 | PACKHS_SPECIAL_0_OPCODE_X1 = 72, | ||
258 | PACKLB_SPECIAL_0_OPCODE_X0 = 53, | ||
259 | PACKLB_SPECIAL_0_OPCODE_X1 = 27, | ||
260 | PCNT_UN_0_SHUN_0_OPCODE_X0 = 7, | ||
261 | PCNT_UN_0_SHUN_0_OPCODE_Y0 = 7, | ||
262 | RLI_SHUN_0_OPCODE_X0 = 1, | ||
263 | RLI_SHUN_0_OPCODE_X1 = 1, | ||
264 | RLI_SHUN_0_OPCODE_Y0 = 1, | ||
265 | RLI_SHUN_0_OPCODE_Y1 = 1, | ||
266 | RL_SPECIAL_0_OPCODE_X0 = 54, | ||
267 | RL_SPECIAL_0_OPCODE_X1 = 28, | ||
268 | RL_SPECIAL_3_OPCODE_Y0 = 0, | ||
269 | RL_SPECIAL_3_OPCODE_Y1 = 0, | ||
270 | RR_IMM_0_OPCODE_SN = 0, | ||
271 | S1A_SPECIAL_0_OPCODE_X0 = 55, | ||
272 | S1A_SPECIAL_0_OPCODE_X1 = 29, | ||
273 | S1A_SPECIAL_0_OPCODE_Y0 = 1, | ||
274 | S1A_SPECIAL_0_OPCODE_Y1 = 1, | ||
275 | S2A_SPECIAL_0_OPCODE_X0 = 56, | ||
276 | S2A_SPECIAL_0_OPCODE_X1 = 30, | ||
277 | S2A_SPECIAL_0_OPCODE_Y0 = 2, | ||
278 | S2A_SPECIAL_0_OPCODE_Y1 = 2, | ||
279 | S3A_SPECIAL_0_OPCODE_X0 = 57, | ||
280 | S3A_SPECIAL_0_OPCODE_X1 = 31, | ||
281 | S3A_SPECIAL_5_OPCODE_Y0 = 1, | ||
282 | S3A_SPECIAL_5_OPCODE_Y1 = 1, | ||
283 | SADAB_U_SPECIAL_0_OPCODE_X0 = 58, | ||
284 | SADAH_SPECIAL_0_OPCODE_X0 = 59, | ||
285 | SADAH_U_SPECIAL_0_OPCODE_X0 = 60, | ||
286 | SADB_U_SPECIAL_0_OPCODE_X0 = 61, | ||
287 | SADH_SPECIAL_0_OPCODE_X0 = 62, | ||
288 | SADH_U_SPECIAL_0_OPCODE_X0 = 63, | ||
289 | SBADD_IMM_0_OPCODE_X1 = 28, | ||
290 | SB_OPCODE_Y2 = 5, | ||
291 | SB_SPECIAL_0_OPCODE_X1 = 32, | ||
292 | SEQB_SPECIAL_0_OPCODE_X0 = 64, | ||
293 | SEQB_SPECIAL_0_OPCODE_X1 = 33, | ||
294 | SEQH_SPECIAL_0_OPCODE_X0 = 65, | ||
295 | SEQH_SPECIAL_0_OPCODE_X1 = 34, | ||
296 | SEQIB_IMM_0_OPCODE_X0 = 9, | ||
297 | SEQIB_IMM_0_OPCODE_X1 = 12, | ||
298 | SEQIH_IMM_0_OPCODE_X0 = 10, | ||
299 | SEQIH_IMM_0_OPCODE_X1 = 13, | ||
300 | SEQI_IMM_0_OPCODE_X0 = 11, | ||
301 | SEQI_IMM_0_OPCODE_X1 = 14, | ||
302 | SEQI_OPCODE_Y0 = 12, | ||
303 | SEQI_OPCODE_Y1 = 10, | ||
304 | SEQ_SPECIAL_0_OPCODE_X0 = 66, | ||
305 | SEQ_SPECIAL_0_OPCODE_X1 = 35, | ||
306 | SEQ_SPECIAL_5_OPCODE_Y0 = 2, | ||
307 | SEQ_SPECIAL_5_OPCODE_Y1 = 2, | ||
308 | SHADD_IMM_0_OPCODE_X1 = 29, | ||
309 | SHL8II_IMM_0_OPCODE_SN = 3, | ||
310 | SHLB_SPECIAL_0_OPCODE_X0 = 67, | ||
311 | SHLB_SPECIAL_0_OPCODE_X1 = 36, | ||
312 | SHLH_SPECIAL_0_OPCODE_X0 = 68, | ||
313 | SHLH_SPECIAL_0_OPCODE_X1 = 37, | ||
314 | SHLIB_SHUN_0_OPCODE_X0 = 2, | ||
315 | SHLIB_SHUN_0_OPCODE_X1 = 2, | ||
316 | SHLIH_SHUN_0_OPCODE_X0 = 3, | ||
317 | SHLIH_SHUN_0_OPCODE_X1 = 3, | ||
318 | SHLI_SHUN_0_OPCODE_X0 = 4, | ||
319 | SHLI_SHUN_0_OPCODE_X1 = 4, | ||
320 | SHLI_SHUN_0_OPCODE_Y0 = 2, | ||
321 | SHLI_SHUN_0_OPCODE_Y1 = 2, | ||
322 | SHL_SPECIAL_0_OPCODE_X0 = 69, | ||
323 | SHL_SPECIAL_0_OPCODE_X1 = 38, | ||
324 | SHL_SPECIAL_3_OPCODE_Y0 = 1, | ||
325 | SHL_SPECIAL_3_OPCODE_Y1 = 1, | ||
326 | SHR1_RR_IMM_0_OPCODE_SN = 9, | ||
327 | SHRB_SPECIAL_0_OPCODE_X0 = 70, | ||
328 | SHRB_SPECIAL_0_OPCODE_X1 = 39, | ||
329 | SHRH_SPECIAL_0_OPCODE_X0 = 71, | ||
330 | SHRH_SPECIAL_0_OPCODE_X1 = 40, | ||
331 | SHRIB_SHUN_0_OPCODE_X0 = 5, | ||
332 | SHRIB_SHUN_0_OPCODE_X1 = 5, | ||
333 | SHRIH_SHUN_0_OPCODE_X0 = 6, | ||
334 | SHRIH_SHUN_0_OPCODE_X1 = 6, | ||
335 | SHRI_SHUN_0_OPCODE_X0 = 7, | ||
336 | SHRI_SHUN_0_OPCODE_X1 = 7, | ||
337 | SHRI_SHUN_0_OPCODE_Y0 = 3, | ||
338 | SHRI_SHUN_0_OPCODE_Y1 = 3, | ||
339 | SHR_SPECIAL_0_OPCODE_X0 = 72, | ||
340 | SHR_SPECIAL_0_OPCODE_X1 = 41, | ||
341 | SHR_SPECIAL_3_OPCODE_Y0 = 2, | ||
342 | SHR_SPECIAL_3_OPCODE_Y1 = 2, | ||
343 | SHUN_0_OPCODE_X0 = 7, | ||
344 | SHUN_0_OPCODE_X1 = 8, | ||
345 | SHUN_0_OPCODE_Y0 = 13, | ||
346 | SHUN_0_OPCODE_Y1 = 11, | ||
347 | SH_OPCODE_Y2 = 6, | ||
348 | SH_SPECIAL_0_OPCODE_X1 = 42, | ||
349 | SLTB_SPECIAL_0_OPCODE_X0 = 73, | ||
350 | SLTB_SPECIAL_0_OPCODE_X1 = 43, | ||
351 | SLTB_U_SPECIAL_0_OPCODE_X0 = 74, | ||
352 | SLTB_U_SPECIAL_0_OPCODE_X1 = 44, | ||
353 | SLTEB_SPECIAL_0_OPCODE_X0 = 75, | ||
354 | SLTEB_SPECIAL_0_OPCODE_X1 = 45, | ||
355 | SLTEB_U_SPECIAL_0_OPCODE_X0 = 76, | ||
356 | SLTEB_U_SPECIAL_0_OPCODE_X1 = 46, | ||
357 | SLTEH_SPECIAL_0_OPCODE_X0 = 77, | ||
358 | SLTEH_SPECIAL_0_OPCODE_X1 = 47, | ||
359 | SLTEH_U_SPECIAL_0_OPCODE_X0 = 78, | ||
360 | SLTEH_U_SPECIAL_0_OPCODE_X1 = 48, | ||
361 | SLTE_SPECIAL_0_OPCODE_X0 = 79, | ||
362 | SLTE_SPECIAL_0_OPCODE_X1 = 49, | ||
363 | SLTE_SPECIAL_4_OPCODE_Y0 = 0, | ||
364 | SLTE_SPECIAL_4_OPCODE_Y1 = 0, | ||
365 | SLTE_U_SPECIAL_0_OPCODE_X0 = 80, | ||
366 | SLTE_U_SPECIAL_0_OPCODE_X1 = 50, | ||
367 | SLTE_U_SPECIAL_4_OPCODE_Y0 = 1, | ||
368 | SLTE_U_SPECIAL_4_OPCODE_Y1 = 1, | ||
369 | SLTH_SPECIAL_0_OPCODE_X0 = 81, | ||
370 | SLTH_SPECIAL_0_OPCODE_X1 = 51, | ||
371 | SLTH_U_SPECIAL_0_OPCODE_X0 = 82, | ||
372 | SLTH_U_SPECIAL_0_OPCODE_X1 = 52, | ||
373 | SLTIB_IMM_0_OPCODE_X0 = 12, | ||
374 | SLTIB_IMM_0_OPCODE_X1 = 15, | ||
375 | SLTIB_U_IMM_0_OPCODE_X0 = 13, | ||
376 | SLTIB_U_IMM_0_OPCODE_X1 = 16, | ||
377 | SLTIH_IMM_0_OPCODE_X0 = 14, | ||
378 | SLTIH_IMM_0_OPCODE_X1 = 17, | ||
379 | SLTIH_U_IMM_0_OPCODE_X0 = 15, | ||
380 | SLTIH_U_IMM_0_OPCODE_X1 = 18, | ||
381 | SLTI_IMM_0_OPCODE_X0 = 16, | ||
382 | SLTI_IMM_0_OPCODE_X1 = 19, | ||
383 | SLTI_OPCODE_Y0 = 14, | ||
384 | SLTI_OPCODE_Y1 = 12, | ||
385 | SLTI_U_IMM_0_OPCODE_X0 = 17, | ||
386 | SLTI_U_IMM_0_OPCODE_X1 = 20, | ||
387 | SLTI_U_OPCODE_Y0 = 15, | ||
388 | SLTI_U_OPCODE_Y1 = 13, | ||
389 | SLT_SPECIAL_0_OPCODE_X0 = 83, | ||
390 | SLT_SPECIAL_0_OPCODE_X1 = 53, | ||
391 | SLT_SPECIAL_4_OPCODE_Y0 = 2, | ||
392 | SLT_SPECIAL_4_OPCODE_Y1 = 2, | ||
393 | SLT_U_SPECIAL_0_OPCODE_X0 = 84, | ||
394 | SLT_U_SPECIAL_0_OPCODE_X1 = 54, | ||
395 | SLT_U_SPECIAL_4_OPCODE_Y0 = 3, | ||
396 | SLT_U_SPECIAL_4_OPCODE_Y1 = 3, | ||
397 | SNEB_SPECIAL_0_OPCODE_X0 = 85, | ||
398 | SNEB_SPECIAL_0_OPCODE_X1 = 55, | ||
399 | SNEH_SPECIAL_0_OPCODE_X0 = 86, | ||
400 | SNEH_SPECIAL_0_OPCODE_X1 = 56, | ||
401 | SNE_SPECIAL_0_OPCODE_X0 = 87, | ||
402 | SNE_SPECIAL_0_OPCODE_X1 = 57, | ||
403 | SNE_SPECIAL_5_OPCODE_Y0 = 3, | ||
404 | SNE_SPECIAL_5_OPCODE_Y1 = 3, | ||
405 | SPECIAL_0_OPCODE_X0 = 0, | ||
406 | SPECIAL_0_OPCODE_X1 = 1, | ||
407 | SPECIAL_0_OPCODE_Y0 = 1, | ||
408 | SPECIAL_0_OPCODE_Y1 = 1, | ||
409 | SPECIAL_1_OPCODE_Y0 = 2, | ||
410 | SPECIAL_1_OPCODE_Y1 = 2, | ||
411 | SPECIAL_2_OPCODE_Y0 = 3, | ||
412 | SPECIAL_2_OPCODE_Y1 = 3, | ||
413 | SPECIAL_3_OPCODE_Y0 = 4, | ||
414 | SPECIAL_3_OPCODE_Y1 = 4, | ||
415 | SPECIAL_4_OPCODE_Y0 = 5, | ||
416 | SPECIAL_4_OPCODE_Y1 = 5, | ||
417 | SPECIAL_5_OPCODE_Y0 = 6, | ||
418 | SPECIAL_5_OPCODE_Y1 = 6, | ||
419 | SPECIAL_6_OPCODE_Y0 = 7, | ||
420 | SPECIAL_7_OPCODE_Y0 = 8, | ||
421 | SRAB_SPECIAL_0_OPCODE_X0 = 88, | ||
422 | SRAB_SPECIAL_0_OPCODE_X1 = 58, | ||
423 | SRAH_SPECIAL_0_OPCODE_X0 = 89, | ||
424 | SRAH_SPECIAL_0_OPCODE_X1 = 59, | ||
425 | SRAIB_SHUN_0_OPCODE_X0 = 8, | ||
426 | SRAIB_SHUN_0_OPCODE_X1 = 8, | ||
427 | SRAIH_SHUN_0_OPCODE_X0 = 9, | ||
428 | SRAIH_SHUN_0_OPCODE_X1 = 9, | ||
429 | SRAI_SHUN_0_OPCODE_X0 = 10, | ||
430 | SRAI_SHUN_0_OPCODE_X1 = 10, | ||
431 | SRAI_SHUN_0_OPCODE_Y0 = 4, | ||
432 | SRAI_SHUN_0_OPCODE_Y1 = 4, | ||
433 | SRA_SPECIAL_0_OPCODE_X0 = 90, | ||
434 | SRA_SPECIAL_0_OPCODE_X1 = 60, | ||
435 | SRA_SPECIAL_3_OPCODE_Y0 = 3, | ||
436 | SRA_SPECIAL_3_OPCODE_Y1 = 3, | ||
437 | SUBBS_U_SPECIAL_0_OPCODE_X0 = 100, | ||
438 | SUBBS_U_SPECIAL_0_OPCODE_X1 = 70, | ||
439 | SUBB_SPECIAL_0_OPCODE_X0 = 91, | ||
440 | SUBB_SPECIAL_0_OPCODE_X1 = 61, | ||
441 | SUBHS_SPECIAL_0_OPCODE_X0 = 101, | ||
442 | SUBHS_SPECIAL_0_OPCODE_X1 = 71, | ||
443 | SUBH_SPECIAL_0_OPCODE_X0 = 92, | ||
444 | SUBH_SPECIAL_0_OPCODE_X1 = 62, | ||
445 | SUBS_SPECIAL_0_OPCODE_X0 = 97, | ||
446 | SUBS_SPECIAL_0_OPCODE_X1 = 67, | ||
447 | SUB_SPECIAL_0_OPCODE_X0 = 93, | ||
448 | SUB_SPECIAL_0_OPCODE_X1 = 63, | ||
449 | SUB_SPECIAL_0_OPCODE_Y0 = 3, | ||
450 | SUB_SPECIAL_0_OPCODE_Y1 = 3, | ||
451 | SWADD_IMM_0_OPCODE_X1 = 30, | ||
452 | SWINT0_UN_0_SHUN_0_OPCODE_X1 = 18, | ||
453 | SWINT1_UN_0_SHUN_0_OPCODE_X1 = 19, | ||
454 | SWINT2_UN_0_SHUN_0_OPCODE_X1 = 20, | ||
455 | SWINT3_UN_0_SHUN_0_OPCODE_X1 = 21, | ||
456 | SW_OPCODE_Y2 = 7, | ||
457 | SW_SPECIAL_0_OPCODE_X1 = 64, | ||
458 | TBLIDXB0_UN_0_SHUN_0_OPCODE_X0 = 8, | ||
459 | TBLIDXB0_UN_0_SHUN_0_OPCODE_Y0 = 8, | ||
460 | TBLIDXB1_UN_0_SHUN_0_OPCODE_X0 = 9, | ||
461 | TBLIDXB1_UN_0_SHUN_0_OPCODE_Y0 = 9, | ||
462 | TBLIDXB2_UN_0_SHUN_0_OPCODE_X0 = 10, | ||
463 | TBLIDXB2_UN_0_SHUN_0_OPCODE_Y0 = 10, | ||
464 | TBLIDXB3_UN_0_SHUN_0_OPCODE_X0 = 11, | ||
465 | TBLIDXB3_UN_0_SHUN_0_OPCODE_Y0 = 11, | ||
466 | TNS_UN_0_SHUN_0_OPCODE_X1 = 22, | ||
467 | UN_0_SHUN_0_OPCODE_X0 = 11, | ||
468 | UN_0_SHUN_0_OPCODE_X1 = 11, | ||
469 | UN_0_SHUN_0_OPCODE_Y0 = 5, | ||
470 | UN_0_SHUN_0_OPCODE_Y1 = 5, | ||
471 | WH64_UN_0_SHUN_0_OPCODE_X1 = 23, | ||
472 | XORI_IMM_0_OPCODE_X0 = 2, | ||
473 | XORI_IMM_0_OPCODE_X1 = 21, | ||
474 | XOR_SPECIAL_0_OPCODE_X0 = 94, | ||
475 | XOR_SPECIAL_0_OPCODE_X1 = 65, | ||
476 | XOR_SPECIAL_2_OPCODE_Y0 = 3, | ||
477 | XOR_SPECIAL_2_OPCODE_Y1 = 3 | ||
478 | }; | ||
479 | |||
480 | #endif /* !_TILE_OPCODE_CONSTANTS_H */ | ||
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h new file mode 100644 index 000000000000..c8301c43d6d9 --- /dev/null +++ b/arch/tile/include/asm/page.h | |||
@@ -0,0 +1,334 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PAGE_H | ||
16 | #define _ASM_TILE_PAGE_H | ||
17 | |||
18 | #include <linux/const.h> | ||
19 | #include <hv/hypervisor.h> | ||
20 | #include <arch/chip.h> | ||
21 | |||
22 | /* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ | ||
23 | #define PAGE_SHIFT 16 | ||
24 | #define HPAGE_SHIFT 24 | ||
25 | |||
26 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | ||
27 | #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) | ||
28 | |||
29 | #define PAGE_MASK (~(PAGE_SIZE - 1)) | ||
30 | #define HPAGE_MASK (~(HPAGE_SIZE - 1)) | ||
31 | |||
32 | /* | ||
33 | * The {,H}PAGE_SHIFT values must match the HV_LOG2_PAGE_SIZE_xxx | ||
34 | * definitions in <hv/hypervisor.h>. We validate this at build time | ||
35 | * here, and again at runtime during early boot. We provide a | ||
36 | * separate definition since userspace doesn't have <hv/hypervisor.h>. | ||
37 | * | ||
38 | * Be careful to distinguish PAGE_SHIFT from HV_PTE_INDEX_PFN, since | ||
39 | * they are the same on i386 but not TILE. | ||
40 | */ | ||
41 | #if HV_LOG2_PAGE_SIZE_SMALL != PAGE_SHIFT | ||
42 | # error Small page size mismatch in Linux | ||
43 | #endif | ||
44 | #if HV_LOG2_PAGE_SIZE_LARGE != HPAGE_SHIFT | ||
45 | # error Huge page size mismatch in Linux | ||
46 | #endif | ||
47 | |||
48 | #ifndef __ASSEMBLY__ | ||
49 | |||
50 | #include <linux/types.h> | ||
51 | #include <linux/string.h> | ||
52 | |||
53 | struct page; | ||
54 | |||
55 | static inline void clear_page(void *page) | ||
56 | { | ||
57 | memset(page, 0, PAGE_SIZE); | ||
58 | } | ||
59 | |||
60 | static inline void copy_page(void *to, void *from) | ||
61 | { | ||
62 | memcpy(to, from, PAGE_SIZE); | ||
63 | } | ||
64 | |||
65 | static inline void clear_user_page(void *page, unsigned long vaddr, | ||
66 | struct page *pg) | ||
67 | { | ||
68 | clear_page(page); | ||
69 | } | ||
70 | |||
71 | static inline void copy_user_page(void *to, void *from, unsigned long vaddr, | ||
72 | struct page *topage) | ||
73 | { | ||
74 | copy_page(to, from); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Hypervisor page tables are made of the same basic structure. | ||
79 | */ | ||
80 | |||
81 | typedef __u64 pteval_t; | ||
82 | typedef __u64 pmdval_t; | ||
83 | typedef __u64 pudval_t; | ||
84 | typedef __u64 pgdval_t; | ||
85 | typedef __u64 pgprotval_t; | ||
86 | |||
87 | typedef HV_PTE pte_t; | ||
88 | typedef HV_PTE pgd_t; | ||
89 | typedef HV_PTE pgprot_t; | ||
90 | |||
91 | /* | ||
92 | * User L2 page tables are managed as one L2 page table per page, | ||
93 | * because we use the page allocator for them. This keeps the allocation | ||
94 | * simple and makes it potentially useful to implement HIGHPTE at some point. | ||
95 | * However, it's also inefficient, since L2 page tables are much smaller | ||
96 | * than pages (currently 2KB vs 64KB). So we should revisit this. | ||
97 | */ | ||
98 | typedef struct page *pgtable_t; | ||
99 | |||
100 | /* Must be a macro since it is used to create constants. */ | ||
101 | #define __pgprot(val) hv_pte(val) | ||
102 | |||
103 | static inline u64 pgprot_val(pgprot_t pgprot) | ||
104 | { | ||
105 | return hv_pte_val(pgprot); | ||
106 | } | ||
107 | |||
108 | static inline u64 pte_val(pte_t pte) | ||
109 | { | ||
110 | return hv_pte_val(pte); | ||
111 | } | ||
112 | |||
113 | static inline u64 pgd_val(pgd_t pgd) | ||
114 | { | ||
115 | return hv_pte_val(pgd); | ||
116 | } | ||
117 | |||
118 | #ifdef __tilegx__ | ||
119 | |||
120 | typedef HV_PTE pmd_t; | ||
121 | |||
122 | static inline u64 pmd_val(pmd_t pmd) | ||
123 | { | ||
124 | return hv_pte_val(pmd); | ||
125 | } | ||
126 | |||
127 | #endif | ||
128 | |||
129 | #endif /* !__ASSEMBLY__ */ | ||
130 | |||
131 | #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) | ||
132 | |||
133 | #define HUGE_MAX_HSTATE 2 | ||
134 | |||
135 | #ifdef CONFIG_HUGETLB_PAGE | ||
136 | #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA | ||
137 | #endif | ||
138 | |||
139 | /* Each memory controller has PAs distinct in their high bits. */ | ||
140 | #define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS()) | ||
141 | #define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS()) | ||
142 | #define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT) | ||
143 | #define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)) | ||
144 | |||
145 | #ifdef __tilegx__ | ||
146 | |||
147 | /* | ||
148 | * We reserve the lower half of memory for user-space programs, and the | ||
149 | * upper half for system code. We re-map all of physical memory in the | ||
150 | * upper half, which takes a quarter of our VA space. Then we have | ||
151 | * the vmalloc regions. The supervisor code lives at 0xfffffff700000000, | ||
152 | * with the hypervisor above that. | ||
153 | * | ||
154 | * Loadable kernel modules are placed immediately after the static | ||
155 | * supervisor code, with each being allocated a 256MB region of | ||
156 | * address space, so we don't have to worry about the range of "jal" | ||
157 | * and other branch instructions. | ||
158 | * | ||
159 | * For now we keep life simple and just allocate one pmd (4GB) for vmalloc. | ||
160 | * Similarly, for now we don't play any struct page mapping games. | ||
161 | */ | ||
162 | |||
163 | #if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH() | ||
164 | # error Too much PA to map with the VA available! | ||
165 | #endif | ||
166 | #define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1)) | ||
167 | |||
168 | #define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ | ||
169 | #define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ | ||
170 | #define PAGE_OFFSET MEM_HIGH_START | ||
171 | #define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */ | ||
172 | #define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ | ||
173 | #define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ | ||
174 | #define MEM_SV_INTRPT MEM_SV_START | ||
175 | #define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */ | ||
176 | #define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) | ||
177 | #define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */ | ||
178 | |||
179 | /* Highest DTLB address we will use */ | ||
180 | #define KERNEL_HIGH_VADDR MEM_SV_START | ||
181 | |||
182 | /* Since we don't currently provide any fixmaps, we use an impossible VA. */ | ||
183 | #define FIXADDR_TOP MEM_HV_START | ||
184 | |||
185 | #else /* !__tilegx__ */ | ||
186 | |||
187 | /* | ||
188 | * A PAGE_OFFSET of 0xC0000000 means that the kernel has | ||
189 | * a virtual address space of one gigabyte, which limits the | ||
190 | * amount of physical memory you can use to about 768MB. | ||
191 | * If you want more physical memory than this then see the CONFIG_HIGHMEM | ||
192 | * option in the kernel configuration. | ||
193 | * | ||
194 | * The top two 16MB chunks in the table below (VIRT and HV) are | ||
195 | * unavailable to Linux. Since the kernel interrupt vectors must live | ||
196 | * at 0xfd000000, we map all of the bottom of RAM at this address with | ||
197 | * a huge page table entry to minimize its ITLB footprint (as well as | ||
198 | * at PAGE_OFFSET). The last architected requirement is that user | ||
199 | * interrupt vectors live at 0xfc000000, so we make that range of | ||
200 | * memory available to user processes. The remaining regions are sized | ||
201 | * as shown; after the first four addresses, we show "typical" values, | ||
202 | * since the actual addresses depend on kernel #defines. | ||
203 | * | ||
204 | * MEM_VIRT_INTRPT 0xff000000 | ||
205 | * MEM_HV_INTRPT 0xfe000000 | ||
206 | * MEM_SV_INTRPT (kernel code) 0xfd000000 | ||
207 | * MEM_USER_INTRPT (user vector) 0xfc000000 | ||
208 | * FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR) | ||
209 | * PKMAP_BASE 0xf7000000 (via LAST_PKMAP) | ||
210 | * HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS) | ||
211 | * VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE) | ||
212 | * mapped LOWMEM 0xc0000000 | ||
213 | */ | ||
214 | |||
215 | #define MEM_USER_INTRPT _AC(0xfc000000, UL) | ||
216 | #define MEM_SV_INTRPT _AC(0xfd000000, UL) | ||
217 | #define MEM_HV_INTRPT _AC(0xfe000000, UL) | ||
218 | #define MEM_VIRT_INTRPT _AC(0xff000000, UL) | ||
219 | |||
220 | #define INTRPT_SIZE 0x4000 | ||
221 | |||
222 | /* Tolerate page size larger than the architecture interrupt region size. */ | ||
223 | #if PAGE_SIZE > INTRPT_SIZE | ||
224 | #undef INTRPT_SIZE | ||
225 | #define INTRPT_SIZE PAGE_SIZE | ||
226 | #endif | ||
227 | |||
228 | #define KERNEL_HIGH_VADDR MEM_USER_INTRPT | ||
229 | #define FIXADDR_TOP (KERNEL_HIGH_VADDR - PAGE_SIZE) | ||
230 | |||
231 | #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) | ||
232 | |||
233 | /* On 32-bit architectures we mix kernel modules in with other vmaps. */ | ||
234 | #define MEM_MODULE_START VMALLOC_START | ||
235 | #define MEM_MODULE_END VMALLOC_END | ||
236 | |||
237 | #endif /* __tilegx__ */ | ||
238 | |||
239 | #ifndef __ASSEMBLY__ | ||
240 | |||
241 | #ifdef CONFIG_HIGHMEM | ||
242 | |||
243 | /* Map kernel virtual addresses to page frames, in HPAGE_SIZE chunks. */ | ||
244 | extern unsigned long pbase_map[]; | ||
245 | extern void *vbase_map[]; | ||
246 | |||
247 | static inline unsigned long kaddr_to_pfn(const volatile void *_kaddr) | ||
248 | { | ||
249 | unsigned long kaddr = (unsigned long)_kaddr; | ||
250 | return pbase_map[kaddr >> HPAGE_SHIFT] + | ||
251 | ((kaddr & (HPAGE_SIZE - 1)) >> PAGE_SHIFT); | ||
252 | } | ||
253 | |||
254 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
255 | { | ||
256 | return vbase_map[__pfn_to_highbits(pfn)] + (pfn << PAGE_SHIFT); | ||
257 | } | ||
258 | |||
259 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
260 | { | ||
261 | unsigned long pfn = kaddr_to_pfn(kaddr); | ||
262 | return ((phys_addr_t)pfn << PAGE_SHIFT) + | ||
263 | ((unsigned long)kaddr & (PAGE_SIZE-1)); | ||
264 | } | ||
265 | |||
266 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
267 | { | ||
268 | return pfn_to_kaddr(paddr >> PAGE_SHIFT) + (paddr & (PAGE_SIZE-1)); | ||
269 | } | ||
270 | |||
271 | /* With HIGHMEM, we pack PAGE_OFFSET through high_memory with all valid VAs. */ | ||
272 | static inline int virt_addr_valid(const volatile void *kaddr) | ||
273 | { | ||
274 | extern void *high_memory; /* copied from <linux/mm.h> */ | ||
275 | return ((unsigned long)kaddr >= PAGE_OFFSET && kaddr < high_memory); | ||
276 | } | ||
277 | |||
278 | #else /* !CONFIG_HIGHMEM */ | ||
279 | |||
280 | static inline unsigned long kaddr_to_pfn(const volatile void *kaddr) | ||
281 | { | ||
282 | return ((unsigned long)kaddr - PAGE_OFFSET) >> PAGE_SHIFT; | ||
283 | } | ||
284 | |||
285 | static inline void *pfn_to_kaddr(unsigned long pfn) | ||
286 | { | ||
287 | return (void *)((pfn << PAGE_SHIFT) + PAGE_OFFSET); | ||
288 | } | ||
289 | |||
290 | static inline phys_addr_t virt_to_phys(const volatile void *kaddr) | ||
291 | { | ||
292 | return (phys_addr_t)((unsigned long)kaddr - PAGE_OFFSET); | ||
293 | } | ||
294 | |||
295 | static inline void *phys_to_virt(phys_addr_t paddr) | ||
296 | { | ||
297 | return (void *)((unsigned long)paddr + PAGE_OFFSET); | ||
298 | } | ||
299 | |||
300 | /* Check that the given address is within some mapped range of PAs. */ | ||
301 | #define virt_addr_valid(kaddr) pfn_valid(kaddr_to_pfn(kaddr)) | ||
302 | |||
303 | #endif /* !CONFIG_HIGHMEM */ | ||
304 | |||
305 | /* All callers are not consistent in how they call these functions. */ | ||
306 | #define __pa(kaddr) virt_to_phys((void *)(unsigned long)(kaddr)) | ||
307 | #define __va(paddr) phys_to_virt((phys_addr_t)(paddr)) | ||
308 | |||
309 | extern int devmem_is_allowed(unsigned long pagenr); | ||
310 | |||
311 | #ifdef CONFIG_FLATMEM | ||
312 | static inline int pfn_valid(unsigned long pfn) | ||
313 | { | ||
314 | return pfn < max_mapnr; | ||
315 | } | ||
316 | #endif | ||
317 | |||
318 | /* Provide as macros since these require some other headers included. */ | ||
319 | #define page_to_pa(page) ((phys_addr_t)(page_to_pfn(page)) << PAGE_SHIFT) | ||
320 | #define virt_to_page(kaddr) pfn_to_page(kaddr_to_pfn(kaddr)) | ||
321 | #define page_to_virt(page) pfn_to_kaddr(page_to_pfn(page)) | ||
322 | |||
323 | struct mm_struct; | ||
324 | extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); | ||
325 | |||
326 | #endif /* !__ASSEMBLY__ */ | ||
327 | |||
328 | #define VM_DATA_DEFAULT_FLAGS \ | ||
329 | (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | ||
330 | |||
331 | #include <asm-generic/memory_model.h> | ||
332 | #include <asm-generic/getorder.h> | ||
333 | |||
334 | #endif /* _ASM_TILE_PAGE_H */ | ||
diff --git a/arch/tile/include/asm/param.h b/arch/tile/include/asm/param.h new file mode 100644 index 000000000000..965d45427975 --- /dev/null +++ b/arch/tile/include/asm/param.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/param.h> | |||
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h new file mode 100644 index 000000000000..e853b0e2793b --- /dev/null +++ b/arch/tile/include/asm/pci-bridge.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_BRIDGE_H | ||
16 | #define _ASM_TILE_PCI_BRIDGE_H | ||
17 | |||
18 | #include <linux/ioport.h> | ||
19 | #include <linux/pci.h> | ||
20 | |||
21 | struct device_node; | ||
22 | struct pci_controller; | ||
23 | |||
24 | /* | ||
25 | * pci_io_base returns the memory address at which you can access | ||
26 | * the I/O space for PCI bus number `bus' (or NULL on error). | ||
27 | */ | ||
28 | extern void __iomem *pci_bus_io_base(unsigned int bus); | ||
29 | extern unsigned long pci_bus_io_base_phys(unsigned int bus); | ||
30 | extern unsigned long pci_bus_mem_base_phys(unsigned int bus); | ||
31 | |||
32 | /* Allocate a new PCI host bridge structure */ | ||
33 | extern struct pci_controller *pcibios_alloc_controller(void); | ||
34 | |||
35 | /* Helper function for setting up resources */ | ||
36 | extern void pci_init_resource(struct resource *res, unsigned long start, | ||
37 | unsigned long end, int flags, char *name); | ||
38 | |||
39 | /* Get the PCI host controller for a bus */ | ||
40 | extern struct pci_controller *pci_bus_to_hose(int bus); | ||
41 | |||
42 | /* | ||
43 | * Structure of a PCI controller (host bridge) | ||
44 | */ | ||
45 | struct pci_controller { | ||
46 | int index; /* PCI domain number */ | ||
47 | struct pci_bus *root_bus; | ||
48 | |||
49 | int first_busno; | ||
50 | int last_busno; | ||
51 | |||
52 | int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ | ||
53 | int hv_mem_fd; /* fd to Hypervisor for MMIO operations */ | ||
54 | |||
55 | struct pci_ops *ops; | ||
56 | |||
57 | int irq_base; /* Base IRQ from the Hypervisor */ | ||
58 | int plx_gen1; /* flag for PLX Gen 1 configuration */ | ||
59 | |||
60 | /* Address ranges that are routed to this controller/bridge. */ | ||
61 | struct resource mem_resources[3]; | ||
62 | }; | ||
63 | |||
64 | static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus) | ||
65 | { | ||
66 | return bus->sysdata; | ||
67 | } | ||
68 | |||
69 | extern void setup_indirect_pci_nomap(struct pci_controller *hose, | ||
70 | void __iomem *cfg_addr, void __iomem *cfg_data); | ||
71 | extern void setup_indirect_pci(struct pci_controller *hose, | ||
72 | u32 cfg_addr, u32 cfg_data); | ||
73 | extern void setup_grackle(struct pci_controller *hose); | ||
74 | |||
75 | extern unsigned char common_swizzle(struct pci_dev *, unsigned char *); | ||
76 | |||
77 | /* | ||
78 | * The following code swizzles for exactly one bridge. The routine | ||
79 | * common_swizzle below handles multiple bridges. But there are a | ||
80 | * some boards that don't follow the PCI spec's suggestion so we | ||
81 | * break this piece out separately. | ||
82 | */ | ||
83 | static inline unsigned char bridge_swizzle(unsigned char pin, | ||
84 | unsigned char idsel) | ||
85 | { | ||
86 | return (((pin-1) + idsel) % 4) + 1; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * The following macro is used to lookup irqs in a standard table | ||
91 | * format for those PPC systems that do not already have PCI | ||
92 | * interrupts properly routed. | ||
93 | */ | ||
94 | /* FIXME - double check this */ | ||
95 | #define PCI_IRQ_TABLE_LOOKUP ({ \ | ||
96 | long _ctl_ = -1; \ | ||
97 | if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \ | ||
98 | _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \ | ||
99 | _ctl_; \ | ||
100 | }) | ||
101 | |||
102 | /* | ||
103 | * Scan the buses below a given PCI host bridge and assign suitable | ||
104 | * resources to all devices found. | ||
105 | */ | ||
106 | extern int pciauto_bus_scan(struct pci_controller *, int); | ||
107 | |||
108 | #ifdef CONFIG_PCI | ||
109 | extern unsigned long pci_address_to_pio(phys_addr_t address); | ||
110 | #else | ||
111 | static inline unsigned long pci_address_to_pio(phys_addr_t address) | ||
112 | { | ||
113 | return (unsigned long)-1; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #endif /* _ASM_TILE_PCI_BRIDGE_H */ | ||
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h new file mode 100644 index 000000000000..b0c15da2d5d5 --- /dev/null +++ b/arch/tile/include/asm/pci.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PCI_H | ||
16 | #define _ASM_TILE_PCI_H | ||
17 | |||
18 | #include <asm/pci-bridge.h> | ||
19 | |||
20 | /* | ||
21 | * The hypervisor maps the entirety of CPA-space as bus addresses, so | ||
22 | * bus addresses are physical addresses. The networking and block | ||
23 | * device layers use this boolean for bounce buffer decisions. | ||
24 | */ | ||
25 | #define PCI_DMA_BUS_IS_PHYS 1 | ||
26 | |||
27 | struct pci_controller *pci_bus_to_hose(int bus); | ||
28 | unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp); | ||
29 | int __init tile_pci_init(void); | ||
30 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr); | ||
31 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); | ||
32 | void __devinit pcibios_fixup_bus(struct pci_bus *bus); | ||
33 | |||
34 | int __devinit _tile_cfg_read(struct pci_controller *hose, | ||
35 | int bus, | ||
36 | int slot, | ||
37 | int function, | ||
38 | int offset, | ||
39 | int size, | ||
40 | u32 *val); | ||
41 | int __devinit _tile_cfg_write(struct pci_controller *hose, | ||
42 | int bus, | ||
43 | int slot, | ||
44 | int function, | ||
45 | int offset, | ||
46 | int size, | ||
47 | u32 val); | ||
48 | |||
49 | /* | ||
50 | * These are used to to config reads and writes in the early stages of | ||
51 | * setup before the driver infrastructure has been set up enough to be | ||
52 | * able to do config reads and writes. | ||
53 | */ | ||
54 | #define early_cfg_read(where, size, value) \ | ||
55 | _tile_cfg_read(controller, \ | ||
56 | current_bus, \ | ||
57 | pci_slot, \ | ||
58 | pci_fn, \ | ||
59 | where, \ | ||
60 | size, \ | ||
61 | value) | ||
62 | |||
63 | #define early_cfg_write(where, size, value) \ | ||
64 | _tile_cfg_write(controller, \ | ||
65 | current_bus, \ | ||
66 | pci_slot, \ | ||
67 | pci_fn, \ | ||
68 | where, \ | ||
69 | size, \ | ||
70 | value) | ||
71 | |||
72 | |||
73 | |||
74 | #define PCICFG_BYTE 1 | ||
75 | #define PCICFG_WORD 2 | ||
76 | #define PCICFG_DWORD 4 | ||
77 | |||
78 | #define TILE_NUM_PCIE 2 | ||
79 | |||
80 | #define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index) | ||
81 | |||
82 | /* | ||
83 | * This decides whether to display the domain number in /proc. | ||
84 | */ | ||
85 | static inline int pci_proc_domain(struct pci_bus *bus) | ||
86 | { | ||
87 | return 1; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * I/O space is currently not supported. | ||
92 | */ | ||
93 | |||
94 | #define TILE_PCIE_LOWER_IO 0x0 | ||
95 | #define TILE_PCIE_UPPER_IO 0x10000 | ||
96 | #define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF | ||
97 | |||
98 | #define _PAGE_NO_CACHE 0 | ||
99 | #define _PAGE_GUARDED 0 | ||
100 | |||
101 | |||
102 | #define pcibios_assign_all_busses() pci_assign_all_buses | ||
103 | extern int pci_assign_all_buses; | ||
104 | |||
105 | static inline void pcibios_set_master(struct pci_dev *dev) | ||
106 | { | ||
107 | /* No special bus mastering setup handling */ | ||
108 | } | ||
109 | |||
110 | #define PCIBIOS_MIN_MEM 0 | ||
111 | #define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO | ||
112 | |||
113 | /* | ||
114 | * This flag tells if the platform is TILEmpower that needs | ||
115 | * special configuration for the PLX switch chip. | ||
116 | */ | ||
117 | extern int blade_pci; | ||
118 | |||
119 | /* implement the pci_ DMA API in terms of the generic device dma_ one */ | ||
120 | #include <asm-generic/pci-dma-compat.h> | ||
121 | |||
122 | /* generic pci stuff */ | ||
123 | #include <asm-generic/pci.h> | ||
124 | |||
125 | /* Use any cpu for PCI. */ | ||
126 | #define cpumask_of_pcibus(bus) cpu_online_mask | ||
127 | |||
128 | #endif /* _ASM_TILE_PCI_H */ | ||
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h new file mode 100644 index 000000000000..63294f5a8efb --- /dev/null +++ b/arch/tile/include/asm/percpu.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PERCPU_H | ||
16 | #define _ASM_TILE_PERCPU_H | ||
17 | |||
18 | register unsigned long __my_cpu_offset __asm__("tp"); | ||
19 | #define __my_cpu_offset __my_cpu_offset | ||
20 | #define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) | ||
21 | |||
22 | #include <asm-generic/percpu.h> | ||
23 | |||
24 | #endif /* _ASM_TILE_PERCPU_H */ | ||
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h new file mode 100644 index 000000000000..cf52791a5501 --- /dev/null +++ b/arch/tile/include/asm/pgalloc.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PGALLOC_H | ||
16 | #define _ASM_TILE_PGALLOC_H | ||
17 | |||
18 | #include <linux/threads.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/mmzone.h> | ||
21 | #include <asm/fixmap.h> | ||
22 | #include <hv/hypervisor.h> | ||
23 | |||
24 | /* Bits for the size of the second-level page table. */ | ||
25 | #define L2_KERNEL_PGTABLE_SHIFT \ | ||
26 | (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) | ||
27 | |||
28 | /* We currently allocate user L2 page tables by page (unlike kernel L2s). */ | ||
29 | #if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL | ||
30 | #define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL | ||
31 | #else | ||
32 | #define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT | ||
33 | #endif | ||
34 | |||
35 | /* How many pages do we need, as an "order", for a user L2 page table? */ | ||
36 | #define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) | ||
37 | |||
38 | /* How big is a kernel L2 page table? */ | ||
39 | #define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT) | ||
40 | |||
41 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | ||
42 | { | ||
43 | #ifdef CONFIG_64BIT | ||
44 | set_pte_order(pmdp, pmd, L2_USER_PGTABLE_ORDER); | ||
45 | #else | ||
46 | set_pte_order(&pmdp->pud.pgd, pmd.pud.pgd, L2_USER_PGTABLE_ORDER); | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | static inline void pmd_populate_kernel(struct mm_struct *mm, | ||
51 | pmd_t *pmd, pte_t *ptep) | ||
52 | { | ||
53 | set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, | ||
54 | __pgprot(_PAGE_PRESENT))); | ||
55 | } | ||
56 | |||
57 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | ||
58 | pgtable_t page) | ||
59 | { | ||
60 | set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), | ||
61 | __pgprot(_PAGE_PRESENT))); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Allocate and free page tables. | ||
66 | */ | ||
67 | |||
68 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | ||
69 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | ||
70 | |||
71 | extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); | ||
72 | extern void pte_free(struct mm_struct *mm, struct page *pte); | ||
73 | |||
74 | #define pmd_pgtable(pmd) pmd_page(pmd) | ||
75 | |||
76 | static inline pte_t * | ||
77 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | ||
78 | { | ||
79 | return pfn_to_kaddr(page_to_pfn(pte_alloc_one(mm, address))); | ||
80 | } | ||
81 | |||
82 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | ||
83 | { | ||
84 | BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); | ||
85 | pte_free(mm, virt_to_page(pte)); | ||
86 | } | ||
87 | |||
88 | extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, | ||
89 | unsigned long address); | ||
90 | |||
91 | #define check_pgt_cache() do { } while (0) | ||
92 | |||
93 | /* | ||
94 | * Get the small-page pte_t lowmem entry for a given pfn. | ||
95 | * This may or may not be in use, depending on whether the initial | ||
96 | * huge-page entry for the page has already been shattered. | ||
97 | */ | ||
98 | pte_t *get_prealloc_pte(unsigned long pfn); | ||
99 | |||
100 | /* During init, we can shatter kernel huge pages if needed. */ | ||
101 | void shatter_pmd(pmd_t *pmd); | ||
102 | |||
103 | #ifdef __tilegx__ | ||
104 | /* We share a single page allocator for both L1 and L2 page tables. */ | ||
105 | #if HV_L1_SIZE != HV_L2_SIZE | ||
106 | # error Rework assumption that L1 and L2 page tables are same size. | ||
107 | #endif | ||
108 | #define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER | ||
109 | #define pud_populate(mm, pud, pmd) \ | ||
110 | pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) | ||
111 | #define pmd_alloc_one(mm, addr) \ | ||
112 | ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) | ||
113 | #define pmd_free(mm, pmdp) \ | ||
114 | pte_free((mm), virt_to_page(pmdp)) | ||
115 | #define __pmd_free_tlb(tlb, pmdp, address) \ | ||
116 | __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) | ||
117 | #endif | ||
118 | |||
119 | #endif /* _ASM_TILE_PGALLOC_H */ | ||
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h new file mode 100644 index 000000000000..beb1504e9c10 --- /dev/null +++ b/arch/tile/include/asm/pgtable.h | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This file contains the functions and defines necessary to modify and use | ||
15 | * the TILE page table tree. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_PGTABLE_H | ||
19 | #define _ASM_TILE_PGTABLE_H | ||
20 | |||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | #ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/threads.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/list.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <asm/processor.h> | ||
31 | #include <asm/fixmap.h> | ||
32 | #include <asm/system.h> | ||
33 | |||
34 | struct mm_struct; | ||
35 | struct vm_area_struct; | ||
36 | |||
37 | /* | ||
38 | * ZERO_PAGE is a global shared page that is always zero: used | ||
39 | * for zero-mapped memory areas etc.. | ||
40 | */ | ||
41 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
42 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
43 | |||
44 | extern pgd_t swapper_pg_dir[]; | ||
45 | extern pgprot_t swapper_pgprot; | ||
46 | extern struct kmem_cache *pgd_cache; | ||
47 | extern spinlock_t pgd_lock; | ||
48 | extern struct list_head pgd_list; | ||
49 | |||
50 | /* | ||
51 | * The very last slots in the pgd_t are for addresses unusable by Linux | ||
52 | * (pgd_addr_invalid() returns true). So we use them for the list structure. | ||
53 | * The x86 code we are modelled on uses the page->private/index fields | ||
54 | * (older 2.6 kernels) or the lru list (newer 2.6 kernels), but since | ||
55 | * our pgds are so much smaller than a page, it seems a waste to | ||
56 | * spend a whole page on each pgd. | ||
57 | */ | ||
58 | #define PGD_LIST_OFFSET \ | ||
59 | ((PTRS_PER_PGD * sizeof(pgd_t)) - sizeof(struct list_head)) | ||
60 | #define pgd_to_list(pgd) \ | ||
61 | ((struct list_head *)((char *)(pgd) + PGD_LIST_OFFSET)) | ||
62 | #define list_to_pgd(list) \ | ||
63 | ((pgd_t *)((char *)(list) - PGD_LIST_OFFSET)) | ||
64 | |||
65 | extern void pgtable_cache_init(void); | ||
66 | extern void paging_init(void); | ||
67 | extern void set_page_homes(void); | ||
68 | |||
69 | #define FIRST_USER_ADDRESS 0 | ||
70 | |||
71 | #define _PAGE_PRESENT HV_PTE_PRESENT | ||
72 | #define _PAGE_HUGE_PAGE HV_PTE_PAGE | ||
73 | #define _PAGE_READABLE HV_PTE_READABLE | ||
74 | #define _PAGE_WRITABLE HV_PTE_WRITABLE | ||
75 | #define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE | ||
76 | #define _PAGE_ACCESSED HV_PTE_ACCESSED | ||
77 | #define _PAGE_DIRTY HV_PTE_DIRTY | ||
78 | #define _PAGE_GLOBAL HV_PTE_GLOBAL | ||
79 | #define _PAGE_USER HV_PTE_USER | ||
80 | |||
81 | /* | ||
82 | * All the "standard" bits. Cache-control bits are managed elsewhere. | ||
83 | * This is used to test for valid level-2 page table pointers by checking | ||
84 | * all the bits, and to mask away the cache control bits for mprotect. | ||
85 | */ | ||
86 | #define _PAGE_ALL (\ | ||
87 | _PAGE_PRESENT | \ | ||
88 | _PAGE_HUGE_PAGE | \ | ||
89 | _PAGE_READABLE | \ | ||
90 | _PAGE_WRITABLE | \ | ||
91 | _PAGE_EXECUTABLE | \ | ||
92 | _PAGE_ACCESSED | \ | ||
93 | _PAGE_DIRTY | \ | ||
94 | _PAGE_GLOBAL | \ | ||
95 | _PAGE_USER \ | ||
96 | ) | ||
97 | |||
98 | #define PAGE_NONE \ | ||
99 | __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | ||
100 | #define PAGE_SHARED \ | ||
101 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
102 | _PAGE_USER | _PAGE_ACCESSED) | ||
103 | |||
104 | #define PAGE_SHARED_EXEC \ | ||
105 | __pgprot(_PAGE_PRESENT | _PAGE_READABLE | _PAGE_WRITABLE | \ | ||
106 | _PAGE_EXECUTABLE | _PAGE_USER | _PAGE_ACCESSED) | ||
107 | #define PAGE_COPY_NOEXEC \ | ||
108 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
109 | #define PAGE_COPY_EXEC \ | ||
110 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
111 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
112 | #define PAGE_COPY \ | ||
113 | PAGE_COPY_NOEXEC | ||
114 | #define PAGE_READONLY \ | ||
115 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_READABLE) | ||
116 | #define PAGE_READONLY_EXEC \ | ||
117 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | \ | ||
118 | _PAGE_READABLE | _PAGE_EXECUTABLE) | ||
119 | |||
120 | #define _PAGE_KERNEL_RO \ | ||
121 | (_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_READABLE | _PAGE_ACCESSED) | ||
122 | #define _PAGE_KERNEL \ | ||
123 | (_PAGE_KERNEL_RO | _PAGE_WRITABLE | _PAGE_DIRTY) | ||
124 | #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXECUTABLE) | ||
125 | |||
126 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
127 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) | ||
128 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) | ||
129 | |||
130 | #define page_to_kpgprot(p) PAGE_KERNEL | ||
131 | |||
132 | /* | ||
133 | * We could tighten these up, but for now writable or executable | ||
134 | * implies readable. | ||
135 | */ | ||
136 | #define __P000 PAGE_NONE | ||
137 | #define __P001 PAGE_READONLY | ||
138 | #define __P010 PAGE_COPY /* this is write-only, which we won't support */ | ||
139 | #define __P011 PAGE_COPY | ||
140 | #define __P100 PAGE_READONLY_EXEC | ||
141 | #define __P101 PAGE_READONLY_EXEC | ||
142 | #define __P110 PAGE_COPY_EXEC | ||
143 | #define __P111 PAGE_COPY_EXEC | ||
144 | |||
145 | #define __S000 PAGE_NONE | ||
146 | #define __S001 PAGE_READONLY | ||
147 | #define __S010 PAGE_SHARED | ||
148 | #define __S011 PAGE_SHARED | ||
149 | #define __S100 PAGE_READONLY_EXEC | ||
150 | #define __S101 PAGE_READONLY_EXEC | ||
151 | #define __S110 PAGE_SHARED_EXEC | ||
152 | #define __S111 PAGE_SHARED_EXEC | ||
153 | |||
154 | /* | ||
155 | * All the normal _PAGE_ALL bits are ignored for PMDs, except PAGE_PRESENT | ||
156 | * and PAGE_HUGE_PAGE, which must be one and zero, respectively. | ||
157 | * We set the ignored bits to zero. | ||
158 | */ | ||
159 | #define _PAGE_TABLE _PAGE_PRESENT | ||
160 | |||
161 | /* Inherit the caching flags from the old protection bits. */ | ||
162 | #define pgprot_modify(oldprot, newprot) \ | ||
163 | (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } | ||
164 | |||
165 | /* Just setting the PFN to zero suffices. */ | ||
166 | #define pte_pgprot(x) hv_pte_set_pfn((x), 0) | ||
167 | |||
168 | /* | ||
169 | * For PTEs and PDEs, we must clear the Present bit first when | ||
170 | * clearing a page table entry, so clear the bottom half first and | ||
171 | * enforce ordering with a barrier. | ||
172 | */ | ||
173 | static inline void __pte_clear(pte_t *ptep) | ||
174 | { | ||
175 | #ifdef __tilegx__ | ||
176 | ptep->val = 0; | ||
177 | #else | ||
178 | u32 *tmp = (u32 *)ptep; | ||
179 | tmp[0] = 0; | ||
180 | barrier(); | ||
181 | tmp[1] = 0; | ||
182 | #endif | ||
183 | } | ||
184 | #define pte_clear(mm, addr, ptep) __pte_clear(ptep) | ||
185 | |||
186 | /* | ||
187 | * The following only work if pte_present() is true. | ||
188 | * Undefined behaviour if not.. | ||
189 | */ | ||
190 | #define pte_present hv_pte_get_present | ||
191 | #define pte_user hv_pte_get_user | ||
192 | #define pte_read hv_pte_get_readable | ||
193 | #define pte_dirty hv_pte_get_dirty | ||
194 | #define pte_young hv_pte_get_accessed | ||
195 | #define pte_write hv_pte_get_writable | ||
196 | #define pte_exec hv_pte_get_executable | ||
197 | #define pte_huge hv_pte_get_page | ||
198 | #define pte_rdprotect hv_pte_clear_readable | ||
199 | #define pte_exprotect hv_pte_clear_executable | ||
200 | #define pte_mkclean hv_pte_clear_dirty | ||
201 | #define pte_mkold hv_pte_clear_accessed | ||
202 | #define pte_wrprotect hv_pte_clear_writable | ||
203 | #define pte_mksmall hv_pte_clear_page | ||
204 | #define pte_mkread hv_pte_set_readable | ||
205 | #define pte_mkexec hv_pte_set_executable | ||
206 | #define pte_mkdirty hv_pte_set_dirty | ||
207 | #define pte_mkyoung hv_pte_set_accessed | ||
208 | #define pte_mkwrite hv_pte_set_writable | ||
209 | #define pte_mkhuge hv_pte_set_page | ||
210 | |||
211 | #define pte_special(pte) 0 | ||
212 | #define pte_mkspecial(pte) (pte) | ||
213 | |||
214 | /* | ||
215 | * Use some spare bits in the PTE for user-caching tags. | ||
216 | */ | ||
217 | #define pte_set_forcecache hv_pte_set_client0 | ||
218 | #define pte_get_forcecache hv_pte_get_client0 | ||
219 | #define pte_clear_forcecache hv_pte_clear_client0 | ||
220 | #define pte_set_anyhome hv_pte_set_client1 | ||
221 | #define pte_get_anyhome hv_pte_get_client1 | ||
222 | #define pte_clear_anyhome hv_pte_clear_client1 | ||
223 | |||
224 | /* | ||
225 | * A migrating PTE has PAGE_PRESENT clear but all the other bits preserved. | ||
226 | */ | ||
227 | #define pte_migrating hv_pte_get_migrating | ||
228 | #define pte_mkmigrate(x) hv_pte_set_migrating(hv_pte_clear_present(x)) | ||
229 | #define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x)) | ||
230 | |||
231 | #define pte_ERROR(e) \ | ||
232 | printk("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e)) | ||
233 | #define pgd_ERROR(e) \ | ||
234 | printk("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
235 | |||
236 | /* | ||
237 | * set_pte_order() sets the given PTE and also sanity-checks the | ||
238 | * requested PTE against the page homecaching. Unspecified parts | ||
239 | * of the PTE are filled in when it is written to memory, i.e. all | ||
240 | * caching attributes if "!forcecache", or the home cpu if "anyhome". | ||
241 | */ | ||
242 | extern void set_pte_order(pte_t *ptep, pte_t pte, int order); | ||
243 | |||
244 | #define set_pte(ptep, pteval) set_pte_order(ptep, pteval, 0) | ||
245 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | ||
246 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr, pteval) | ||
247 | |||
248 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
249 | |||
250 | static inline int pte_none(pte_t pte) | ||
251 | { | ||
252 | return !pte.val; | ||
253 | } | ||
254 | |||
255 | static inline unsigned long pte_pfn(pte_t pte) | ||
256 | { | ||
257 | return hv_pte_get_pfn(pte); | ||
258 | } | ||
259 | |||
260 | /* Set or get the remote cache cpu in a pgprot with remote caching. */ | ||
261 | extern pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu); | ||
262 | extern int get_remote_cache_cpu(pgprot_t prot); | ||
263 | |||
264 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | ||
265 | { | ||
266 | return hv_pte_set_pfn(prot, pfn); | ||
267 | } | ||
268 | |||
269 | /* Support for priority mappings. */ | ||
270 | extern void start_mm_caching(struct mm_struct *mm); | ||
271 | extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next); | ||
272 | |||
273 | /* | ||
274 | * Support non-linear file mappings (see sys_remap_file_pages). | ||
275 | * This is defined by CLIENT1 set but CLIENT0 and _PAGE_PRESENT clear, and the | ||
276 | * file offset in the 32 high bits. | ||
277 | */ | ||
278 | #define _PAGE_FILE HV_PTE_CLIENT1 | ||
279 | #define PTE_FILE_MAX_BITS 32 | ||
280 | #define pte_file(pte) (hv_pte_get_client1(pte) && !hv_pte_get_client0(pte)) | ||
281 | #define pte_to_pgoff(pte) ((pte).val >> 32) | ||
282 | #define pgoff_to_pte(off) ((pte_t) { (((long long)(off)) << 32) | _PAGE_FILE }) | ||
283 | |||
284 | /* | ||
285 | * Encode and de-code a swap entry (see <linux/swapops.h>). | ||
286 | * We put the swap file type+offset in the 32 high bits; | ||
287 | * I believe we can just leave the low bits clear. | ||
288 | */ | ||
289 | #define __swp_type(swp) ((swp).val & 0x1f) | ||
290 | #define __swp_offset(swp) ((swp).val >> 5) | ||
291 | #define __swp_entry(type, off) ((swp_entry_t) { (type) | ((off) << 5) }) | ||
292 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).val >> 32 }) | ||
293 | #define __swp_entry_to_pte(swp) ((pte_t) { (((long long) ((swp).val)) << 32) }) | ||
294 | |||
295 | /* | ||
296 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | ||
297 | * | ||
298 | * dst - pointer to pgd range anwhere on a pgd page | ||
299 | * src - "" | ||
300 | * count - the number of pgds to copy. | ||
301 | * | ||
302 | * dst and src can be on the same page, but the range must not overlap, | ||
303 | * and must not cross a page boundary. | ||
304 | */ | ||
305 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | ||
306 | { | ||
307 | memcpy(dst, src, count * sizeof(pgd_t)); | ||
308 | } | ||
309 | |||
310 | /* | ||
311 | * Conversion functions: convert a page and protection to a page entry, | ||
312 | * and a page entry and page directory to the page they refer to. | ||
313 | */ | ||
314 | |||
315 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
316 | |||
317 | /* | ||
318 | * If we are doing an mprotect(), just accept the new vma->vm_page_prot | ||
319 | * value and combine it with the PFN from the old PTE to get a new PTE. | ||
320 | */ | ||
321 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
322 | { | ||
323 | return pfn_pte(hv_pte_get_pfn(pte), newprot); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * The pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | ||
328 | * | ||
329 | * This macro returns the index of the entry in the pgd page which would | ||
330 | * control the given virtual address. | ||
331 | */ | ||
332 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
333 | |||
334 | /* | ||
335 | * pgd_offset() returns a (pgd_t *) | ||
336 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's. | ||
337 | */ | ||
338 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
339 | |||
340 | /* | ||
341 | * A shortcut which implies the use of the kernel's pgd, instead | ||
342 | * of a process's. | ||
343 | */ | ||
344 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
345 | |||
346 | #if defined(CONFIG_HIGHPTE) | ||
347 | extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type); | ||
348 | #define pte_offset_map(dir, address) \ | ||
349 | _pte_offset_map(dir, address, KM_PTE0) | ||
350 | #define pte_offset_map_nested(dir, address) \ | ||
351 | _pte_offset_map(dir, address, KM_PTE1) | ||
352 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
353 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
354 | #else | ||
355 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | ||
356 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
357 | #define pte_unmap(pte) do { } while (0) | ||
358 | #define pte_unmap_nested(pte) do { } while (0) | ||
359 | #endif | ||
360 | |||
361 | /* Clear a non-executable kernel PTE and flush it from the TLB. */ | ||
362 | #define kpte_clear_flush(ptep, vaddr) \ | ||
363 | do { \ | ||
364 | pte_clear(&init_mm, (vaddr), (ptep)); \ | ||
365 | local_flush_tlb_page(FLUSH_NONEXEC, (vaddr), PAGE_SIZE); \ | ||
366 | } while (0) | ||
367 | |||
368 | /* | ||
369 | * The kernel page tables contain what we need, and we flush when we | ||
370 | * change specific page table entries. | ||
371 | */ | ||
372 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
373 | |||
374 | #ifdef CONFIG_FLATMEM | ||
375 | #define kern_addr_valid(addr) (1) | ||
376 | #endif /* CONFIG_FLATMEM */ | ||
377 | |||
378 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
379 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
380 | |||
381 | extern void vmalloc_sync_all(void); | ||
382 | |||
383 | #endif /* !__ASSEMBLY__ */ | ||
384 | |||
385 | #ifdef __tilegx__ | ||
386 | #include <asm/pgtable_64.h> | ||
387 | #else | ||
388 | #include <asm/pgtable_32.h> | ||
389 | #endif | ||
390 | |||
391 | #ifndef __ASSEMBLY__ | ||
392 | |||
393 | static inline int pmd_none(pmd_t pmd) | ||
394 | { | ||
395 | /* | ||
396 | * Only check low word on 32-bit platforms, since it might be | ||
397 | * out of sync with upper half. | ||
398 | */ | ||
399 | return (unsigned long)pmd_val(pmd) == 0; | ||
400 | } | ||
401 | |||
402 | static inline int pmd_present(pmd_t pmd) | ||
403 | { | ||
404 | return pmd_val(pmd) & _PAGE_PRESENT; | ||
405 | } | ||
406 | |||
407 | static inline int pmd_bad(pmd_t pmd) | ||
408 | { | ||
409 | return ((pmd_val(pmd) & _PAGE_ALL) != _PAGE_TABLE); | ||
410 | } | ||
411 | |||
412 | static inline unsigned long pages_to_mb(unsigned long npg) | ||
413 | { | ||
414 | return npg >> (20 - PAGE_SHIFT); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * The pmd can be thought of an array like this: pmd_t[PTRS_PER_PMD] | ||
419 | * | ||
420 | * This function returns the index of the entry in the pmd which would | ||
421 | * control the given virtual address. | ||
422 | */ | ||
423 | static inline unsigned long pmd_index(unsigned long address) | ||
424 | { | ||
425 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | ||
426 | } | ||
427 | |||
428 | /* | ||
429 | * A given kernel pmd_t maps to a specific virtual address (either a | ||
430 | * kernel huge page or a kernel pte_t table). Since kernel pte_t | ||
431 | * tables can be aligned at sub-page granularity, this function can | ||
432 | * return non-page-aligned pointers, despite its name. | ||
433 | */ | ||
434 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | ||
435 | { | ||
436 | phys_addr_t pa = | ||
437 | (phys_addr_t)pmd_ptfn(pmd) << HV_LOG2_PAGE_TABLE_ALIGN; | ||
438 | return (unsigned long)__va(pa); | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * A pmd_t points to the base of a huge page or to a pte_t array. | ||
443 | * If a pte_t array, since we can have multiple per page, we don't | ||
444 | * have a one-to-one mapping of pmd_t's to pages. However, this is | ||
445 | * OK for pte_lockptr(), since we just end up with potentially one | ||
446 | * lock being used for several pte_t arrays. | ||
447 | */ | ||
448 | #define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) | ||
449 | |||
450 | /* | ||
451 | * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | ||
452 | * | ||
453 | * This macro returns the index of the entry in the pte page which would | ||
454 | * control the given virtual address. | ||
455 | */ | ||
456 | static inline unsigned long pte_index(unsigned long address) | ||
457 | { | ||
458 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | ||
459 | } | ||
460 | |||
461 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) | ||
462 | { | ||
463 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | ||
464 | } | ||
465 | |||
466 | static inline int pmd_huge_page(pmd_t pmd) | ||
467 | { | ||
468 | return pmd_val(pmd) & _PAGE_HUGE_PAGE; | ||
469 | } | ||
470 | |||
471 | #include <asm-generic/pgtable.h> | ||
472 | |||
473 | #endif /* !__ASSEMBLY__ */ | ||
474 | |||
475 | #endif /* _ASM_TILE_PGTABLE_H */ | ||
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h new file mode 100644 index 000000000000..b935fb2ad4f3 --- /dev/null +++ b/arch/tile/include/asm/pgtable_32.h | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_PGTABLE_32_H | ||
17 | #define _ASM_TILE_PGTABLE_32_H | ||
18 | |||
19 | /* | ||
20 | * The level-1 index is defined by the huge page size. A PGD is composed | ||
21 | * of PTRS_PER_PGD pgd_t's and is the top level of the page table. | ||
22 | */ | ||
23 | #define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE | ||
24 | #define PGDIR_SIZE HV_PAGE_SIZE_LARGE | ||
25 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
26 | #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) | ||
27 | |||
28 | /* | ||
29 | * The level-2 index is defined by the difference between the huge | ||
30 | * page size and the normal page size. A PTE is composed of | ||
31 | * PTRS_PER_PTE pte_t's and is the bottom level of the page table. | ||
32 | * Note that the hypervisor docs use PTE for what we call pte_t, so | ||
33 | * this nomenclature is somewhat confusing. | ||
34 | */ | ||
35 | #define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | /* | ||
40 | * Right now we initialize only a single pte table. It can be extended | ||
41 | * easily, subsequent pte tables have to be allocated in one physical | ||
42 | * chunk of RAM. | ||
43 | * | ||
44 | * HOWEVER, if we are using an allocation scheme with slop after the | ||
45 | * end of the page table (e.g. where our L2 page tables are 2KB but | ||
46 | * our pages are 64KB and we are allocating via the page allocator) | ||
47 | * we can't extend it easily. | ||
48 | */ | ||
49 | #define LAST_PKMAP PTRS_PER_PTE | ||
50 | |||
51 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*LAST_PKMAP) & PGDIR_MASK) | ||
52 | |||
53 | #ifdef CONFIG_HIGHMEM | ||
54 | # define __VMAPPING_END (PKMAP_BASE & ~(HPAGE_SIZE-1)) | ||
55 | #else | ||
56 | # define __VMAPPING_END (FIXADDR_START & ~(HPAGE_SIZE-1)) | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_HUGEVMAP | ||
60 | #define HUGE_VMAP_END __VMAPPING_END | ||
61 | #define HUGE_VMAP_BASE (HUGE_VMAP_END - CONFIG_NR_HUGE_VMAPS * HPAGE_SIZE) | ||
62 | #define _VMALLOC_END HUGE_VMAP_BASE | ||
63 | #else | ||
64 | #define _VMALLOC_END __VMAPPING_END | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Align the vmalloc area to an L2 page table, and leave a guard page | ||
69 | * at the beginning and end. The vmalloc code also puts in an internal | ||
70 | * guard page between each allocation. | ||
71 | */ | ||
72 | #define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) | ||
73 | extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; | ||
74 | #define _VMALLOC_START (_VMALLOC_END - VMALLOC_RESERVE) | ||
75 | #define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) | ||
76 | |||
77 | /* This is the maximum possible amount of lowmem. */ | ||
78 | #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) | ||
79 | |||
80 | /* We have no pmd or pud since we are strictly a two-level page table */ | ||
81 | #include <asm-generic/pgtable-nopmd.h> | ||
82 | |||
83 | /* We don't define any pgds for these addresses. */ | ||
84 | static inline int pgd_addr_invalid(unsigned long addr) | ||
85 | { | ||
86 | return addr >= MEM_HV_INTRPT; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Provide versions of these routines that can be used safely when | ||
91 | * the hypervisor may be asynchronously modifying dirty/accessed bits. | ||
92 | */ | ||
93 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
94 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
95 | |||
96 | extern int ptep_test_and_clear_young(struct vm_area_struct *, | ||
97 | unsigned long addr, pte_t *); | ||
98 | extern void ptep_set_wrprotect(struct mm_struct *, | ||
99 | unsigned long addr, pte_t *); | ||
100 | |||
101 | /* Create a pmd from a PTFN. */ | ||
102 | static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) | ||
103 | { | ||
104 | return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } }; | ||
105 | } | ||
106 | |||
107 | /* Return the page-table frame number (ptfn) that a pmd_t points at. */ | ||
108 | #define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd) | ||
109 | |||
110 | static inline void pmd_clear(pmd_t *pmdp) | ||
111 | { | ||
112 | __pte_clear(&pmdp->pud.pgd); | ||
113 | } | ||
114 | |||
115 | #endif /* __ASSEMBLY__ */ | ||
116 | |||
117 | #endif /* _ASM_TILE_PGTABLE_32_H */ | ||
diff --git a/arch/tile/include/asm/poll.h b/arch/tile/include/asm/poll.h new file mode 100644 index 000000000000..c98509d3149e --- /dev/null +++ b/arch/tile/include/asm/poll.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/poll.h> | |||
diff --git a/arch/tile/include/asm/posix_types.h b/arch/tile/include/asm/posix_types.h new file mode 100644 index 000000000000..22cae6230ceb --- /dev/null +++ b/arch/tile/include/asm/posix_types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/posix_types.h> | |||
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h new file mode 100644 index 000000000000..96c50d2c4c2b --- /dev/null +++ b/arch/tile/include/asm/processor.h | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PROCESSOR_H | ||
16 | #define _ASM_TILE_PROCESSOR_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | /* | ||
21 | * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one | ||
22 | * normally would, due to #include dependencies. | ||
23 | */ | ||
24 | #include <asm/ptrace.h> | ||
25 | #include <asm/percpu.h> | ||
26 | |||
27 | #include <arch/chip.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | struct task_struct; | ||
31 | struct thread_struct; | ||
32 | struct list_head; | ||
33 | |||
34 | typedef struct { | ||
35 | unsigned long seg; | ||
36 | } mm_segment_t; | ||
37 | |||
38 | /* | ||
39 | * Default implementation of macro that returns current | ||
40 | * instruction pointer ("program counter"). | ||
41 | */ | ||
42 | void *current_text_addr(void); | ||
43 | |||
44 | #if CHIP_HAS_TILE_DMA() | ||
45 | /* Capture the state of a suspended DMA. */ | ||
46 | struct tile_dma_state { | ||
47 | int enabled; | ||
48 | unsigned long src; | ||
49 | unsigned long dest; | ||
50 | unsigned long strides; | ||
51 | unsigned long chunk_size; | ||
52 | unsigned long src_chunk; | ||
53 | unsigned long dest_chunk; | ||
54 | unsigned long byte; | ||
55 | unsigned long status; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * A mask of the DMA status register for selecting only the 'running' | ||
60 | * and 'done' bits. | ||
61 | */ | ||
62 | #define DMA_STATUS_MASK \ | ||
63 | (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK) | ||
64 | #endif | ||
65 | |||
66 | /* | ||
67 | * Track asynchronous TLB events (faults and access violations) | ||
68 | * that occur while we are in kernel mode from DMA or the SN processor. | ||
69 | */ | ||
70 | struct async_tlb { | ||
71 | short fault_num; /* original fault number; 0 if none */ | ||
72 | char is_fault; /* was it a fault (vs an access violation) */ | ||
73 | char is_write; /* for fault: was it caused by a write? */ | ||
74 | unsigned long address; /* what address faulted? */ | ||
75 | }; | ||
76 | |||
77 | |||
78 | struct thread_struct { | ||
79 | /* kernel stack pointer */ | ||
80 | unsigned long ksp; | ||
81 | /* kernel PC */ | ||
82 | unsigned long pc; | ||
83 | /* starting user stack pointer (for page migration) */ | ||
84 | unsigned long usp0; | ||
85 | /* pid of process that created this one */ | ||
86 | pid_t creator_pid; | ||
87 | #if CHIP_HAS_TILE_DMA() | ||
88 | /* DMA info for suspended threads (byte == 0 means no DMA state) */ | ||
89 | struct tile_dma_state tile_dma_state; | ||
90 | #endif | ||
91 | /* User EX_CONTEXT registers */ | ||
92 | unsigned long ex_context[2]; | ||
93 | /* User SYSTEM_SAVE registers */ | ||
94 | unsigned long system_save[4]; | ||
95 | /* User interrupt mask */ | ||
96 | unsigned long long interrupt_mask; | ||
97 | /* User interrupt-control 0 state */ | ||
98 | unsigned long intctrl_0; | ||
99 | #if CHIP_HAS_PROC_STATUS_SPR() | ||
100 | /* Any other miscellaneous processor state bits */ | ||
101 | unsigned long proc_status; | ||
102 | #endif | ||
103 | #if CHIP_HAS_TILE_DMA() | ||
104 | /* Async DMA TLB fault information */ | ||
105 | struct async_tlb dma_async_tlb; | ||
106 | #endif | ||
107 | #if CHIP_HAS_SN_PROC() | ||
108 | /* Was static network processor when we were switched out? */ | ||
109 | int sn_proc_running; | ||
110 | /* Async SNI TLB fault information */ | ||
111 | struct async_tlb sn_async_tlb; | ||
112 | #endif | ||
113 | }; | ||
114 | |||
115 | #endif /* !__ASSEMBLY__ */ | ||
116 | |||
117 | /* | ||
118 | * Start with "sp" this many bytes below the top of the kernel stack. | ||
119 | * This preserves the invariant that a called function may write to *sp. | ||
120 | */ | ||
121 | #define STACK_TOP_DELTA 8 | ||
122 | |||
123 | /* | ||
124 | * When entering the kernel via a fault, start with the top of the | ||
125 | * pt_regs structure this many bytes below the top of the page. | ||
126 | * This aligns the pt_regs structure optimally for cache-line access. | ||
127 | */ | ||
128 | #ifdef __tilegx__ | ||
129 | #define KSTK_PTREGS_GAP 48 | ||
130 | #else | ||
131 | #define KSTK_PTREGS_GAP 56 | ||
132 | #endif | ||
133 | |||
134 | #ifndef __ASSEMBLY__ | ||
135 | |||
136 | #ifdef __tilegx__ | ||
137 | #define TASK_SIZE_MAX (MEM_LOW_END + 1) | ||
138 | #else | ||
139 | #define TASK_SIZE_MAX PAGE_OFFSET | ||
140 | #endif | ||
141 | |||
142 | /* TASK_SIZE and related variables are always checked in "current" context. */ | ||
143 | #ifdef CONFIG_COMPAT | ||
144 | #define COMPAT_TASK_SIZE (1UL << 31) | ||
145 | #define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\ | ||
146 | COMPAT_TASK_SIZE : TASK_SIZE_MAX) | ||
147 | #else | ||
148 | #define TASK_SIZE TASK_SIZE_MAX | ||
149 | #endif | ||
150 | |||
151 | /* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */ | ||
152 | #define VDSO_BASE (TASK_SIZE - PAGE_SIZE) | ||
153 | |||
154 | #define STACK_TOP VDSO_BASE | ||
155 | |||
156 | /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */ | ||
157 | #define STACK_TOP_MAX TASK_SIZE_MAX | ||
158 | |||
159 | /* | ||
160 | * This decides where the kernel will search for a free chunk of vm | ||
161 | * space during mmap's, if it is using bottom-up mapping. | ||
162 | */ | ||
163 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
164 | |||
165 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
166 | |||
167 | #define INIT_THREAD { \ | ||
168 | .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \ | ||
169 | .interrupt_mask = -1ULL \ | ||
170 | } | ||
171 | |||
172 | /* Kernel stack top for the task that first boots on this cpu. */ | ||
173 | DECLARE_PER_CPU(unsigned long, boot_sp); | ||
174 | |||
175 | /* PC to boot from on this cpu. */ | ||
176 | DECLARE_PER_CPU(unsigned long, boot_pc); | ||
177 | |||
178 | /* Do necessary setup to start up a newly executed thread. */ | ||
179 | static inline void start_thread(struct pt_regs *regs, | ||
180 | unsigned long pc, unsigned long usp) | ||
181 | { | ||
182 | regs->pc = pc; | ||
183 | regs->sp = usp; | ||
184 | } | ||
185 | |||
186 | /* Free all resources held by a thread. */ | ||
187 | static inline void release_thread(struct task_struct *dead_task) | ||
188 | { | ||
189 | /* Nothing for now */ | ||
190 | } | ||
191 | |||
192 | /* Prepare to copy thread state - unlazy all lazy status. */ | ||
193 | #define prepare_to_copy(tsk) do { } while (0) | ||
194 | |||
195 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
196 | |||
197 | /* Helper routines for setting home cache modes at exec() time. */ | ||
198 | |||
199 | |||
200 | /* | ||
201 | * Return saved (kernel) PC of a blocked thread. | ||
202 | * Only used in a printk() in kernel/sched.c, so don't work too hard. | ||
203 | */ | ||
204 | #define thread_saved_pc(t) ((t)->thread.pc) | ||
205 | |||
206 | unsigned long get_wchan(struct task_struct *p); | ||
207 | |||
208 | /* Return initial ksp value for given task. */ | ||
209 | #define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE) | ||
210 | |||
211 | /* Return some info about the user process TASK. */ | ||
212 | #define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA) | ||
213 | #define task_pt_regs(task) \ | ||
214 | ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1) | ||
215 | #define task_sp(task) (task_pt_regs(task)->sp) | ||
216 | #define task_pc(task) (task_pt_regs(task)->pc) | ||
217 | /* Aliases for pc and sp (used in fs/proc/array.c) */ | ||
218 | #define KSTK_EIP(task) task_pc(task) | ||
219 | #define KSTK_ESP(task) task_sp(task) | ||
220 | |||
221 | /* Standard format for printing registers and other word-size data. */ | ||
222 | #ifdef __tilegx__ | ||
223 | # define REGFMT "0x%016lx" | ||
224 | #else | ||
225 | # define REGFMT "0x%08lx" | ||
226 | #endif | ||
227 | |||
228 | /* | ||
229 | * Do some slow action (e.g. read a slow SPR). | ||
230 | * Note that this must also have compiler-barrier semantics since | ||
231 | * it may be used in a busy loop reading memory. | ||
232 | */ | ||
233 | static inline void cpu_relax(void) | ||
234 | { | ||
235 | __insn_mfspr(SPR_PASS); | ||
236 | barrier(); | ||
237 | } | ||
238 | |||
239 | struct siginfo; | ||
240 | extern void arch_coredump_signal(struct siginfo *, struct pt_regs *); | ||
241 | #define arch_coredump_signal arch_coredump_signal | ||
242 | |||
243 | /* Provide information about the chip model. */ | ||
244 | extern char chip_model[64]; | ||
245 | |||
246 | /* Data on which physical memory controller corresponds to which NUMA node. */ | ||
247 | extern int node_controller[]; | ||
248 | |||
249 | |||
250 | /* Do we dump information to the console when a user application crashes? */ | ||
251 | extern int show_crashinfo; | ||
252 | |||
253 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
254 | /* Does the heap allocator return hash-for-home pages by default? */ | ||
255 | extern int hash_default; | ||
256 | |||
257 | /* Should kernel stack pages be hash-for-home? */ | ||
258 | extern int kstack_hash; | ||
259 | #else | ||
260 | #define hash_default 0 | ||
261 | #define kstack_hash 0 | ||
262 | #endif | ||
263 | |||
264 | /* Are we using huge pages in the TLB for kernel data? */ | ||
265 | extern int kdata_huge; | ||
266 | |||
267 | /* | ||
268 | * Note that with OLOC the prefetch will return an unused read word to | ||
269 | * the issuing tile, which will cause some MDN traffic. Benchmarking | ||
270 | * should be done to see whether this outweighs prefetching. | ||
271 | */ | ||
272 | #define ARCH_HAS_PREFETCH | ||
273 | #define ARCH_HAS_PREFETCHW | ||
274 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
275 | |||
276 | #define prefetch(ptr) __builtin_prefetch((ptr), 0, 3) | ||
277 | #define prefetchw(ptr) __builtin_prefetch((ptr), 1, 3) | ||
278 | |||
279 | #ifdef CONFIG_SMP | ||
280 | #define spin_lock_prefetch(ptr) prefetchw(ptr) | ||
281 | #else | ||
282 | /* Nothing to prefetch. */ | ||
283 | #define spin_lock_prefetch(lock) do { } while (0) | ||
284 | #endif | ||
285 | |||
286 | #else /* __ASSEMBLY__ */ | ||
287 | |||
288 | /* Do some slow action (e.g. read a slow SPR). */ | ||
289 | #define CPU_RELAX mfspr zero, SPR_PASS | ||
290 | |||
291 | #endif /* !__ASSEMBLY__ */ | ||
292 | |||
293 | /* Assembly code assumes that the PL is in the low bits. */ | ||
294 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0 | ||
295 | # error Fix assembly assumptions about PL | ||
296 | #endif | ||
297 | |||
298 | /* We sometimes use these macros for EX_CONTEXT_0_1 as well. */ | ||
299 | #if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \ | ||
300 | SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \ | ||
301 | SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \ | ||
302 | SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK | ||
303 | # error Fix assumptions that EX1 macros work for both PL0 and PL1 | ||
304 | #endif | ||
305 | |||
306 | /* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */ | ||
307 | #define EX1_PL(ex1) \ | ||
308 | (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK) | ||
309 | #define EX1_ICS(ex1) \ | ||
310 | (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK) | ||
311 | #define PL_ICS_EX1(pl, ics) \ | ||
312 | (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \ | ||
313 | ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT)) | ||
314 | |||
315 | /* | ||
316 | * Provide symbolic constants for PLs. | ||
317 | * Note that assembly code assumes that USER_PL is zero. | ||
318 | */ | ||
319 | #define USER_PL 0 | ||
320 | #define KERNEL_PL 1 | ||
321 | |||
322 | /* SYSTEM_SAVE_1_0 holds the current cpu number ORed with ksp0. */ | ||
323 | #define CPU_LOG_MASK_VALUE 12 | ||
324 | #define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) | ||
325 | #if CONFIG_NR_CPUS > CPU_MASK_VALUE | ||
326 | # error Too many cpus! | ||
327 | #endif | ||
328 | #define raw_smp_processor_id() \ | ||
329 | ((int)__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & CPU_MASK_VALUE) | ||
330 | #define get_current_ksp0() \ | ||
331 | (__insn_mfspr(SPR_SYSTEM_SAVE_1_0) & ~CPU_MASK_VALUE) | ||
332 | #define next_current_ksp0(task) ({ \ | ||
333 | unsigned long __ksp0 = task_ksp0(task); \ | ||
334 | int __cpu = raw_smp_processor_id(); \ | ||
335 | BUG_ON(__ksp0 & CPU_MASK_VALUE); \ | ||
336 | __ksp0 | __cpu; \ | ||
337 | }) | ||
338 | |||
339 | #endif /* _ASM_TILE_PROCESSOR_H */ | ||
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h new file mode 100644 index 000000000000..4d1d9953016a --- /dev/null +++ b/arch/tile/include/asm/ptrace.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_PTRACE_H | ||
16 | #define _ASM_TILE_PTRACE_H | ||
17 | |||
18 | #include <arch/chip.h> | ||
19 | #include <arch/abi.h> | ||
20 | |||
21 | /* These must match struct pt_regs, below. */ | ||
22 | #if CHIP_WORD_SIZE() == 32 | ||
23 | #define PTREGS_OFFSET_REG(n) ((n)*4) | ||
24 | #else | ||
25 | #define PTREGS_OFFSET_REG(n) ((n)*8) | ||
26 | #endif | ||
27 | #define PTREGS_OFFSET_BASE 0 | ||
28 | #define PTREGS_OFFSET_TP PTREGS_OFFSET_REG(53) | ||
29 | #define PTREGS_OFFSET_SP PTREGS_OFFSET_REG(54) | ||
30 | #define PTREGS_OFFSET_LR PTREGS_OFFSET_REG(55) | ||
31 | #define PTREGS_NR_GPRS 56 | ||
32 | #define PTREGS_OFFSET_PC PTREGS_OFFSET_REG(56) | ||
33 | #define PTREGS_OFFSET_EX1 PTREGS_OFFSET_REG(57) | ||
34 | #define PTREGS_OFFSET_FAULTNUM PTREGS_OFFSET_REG(58) | ||
35 | #define PTREGS_OFFSET_ORIG_R0 PTREGS_OFFSET_REG(59) | ||
36 | #define PTREGS_OFFSET_FLAGS PTREGS_OFFSET_REG(60) | ||
37 | #if CHIP_HAS_CMPEXCH() | ||
38 | #define PTREGS_OFFSET_CMPEXCH PTREGS_OFFSET_REG(61) | ||
39 | #endif | ||
40 | #define PTREGS_SIZE PTREGS_OFFSET_REG(64) | ||
41 | |||
42 | #ifndef __ASSEMBLY__ | ||
43 | |||
44 | #ifdef __KERNEL__ | ||
45 | /* Benefit from consistent use of "long" on all chips. */ | ||
46 | typedef unsigned long pt_reg_t; | ||
47 | #else | ||
48 | /* Provide appropriate length type to userspace regardless of -m32/-m64. */ | ||
49 | typedef uint_reg_t pt_reg_t; | ||
50 | #endif | ||
51 | |||
52 | /* | ||
53 | * This struct defines the way the registers are stored on the stack during a | ||
54 | * system call/exception. It should be a multiple of 8 bytes to preserve | ||
55 | * normal stack alignment rules. | ||
56 | * | ||
57 | * Must track <sys/ucontext.h> and <sys/procfs.h> | ||
58 | */ | ||
59 | struct pt_regs { | ||
60 | /* Saved main processor registers; 56..63 are special. */ | ||
61 | /* tp, sp, and lr must immediately follow regs[] for aliasing. */ | ||
62 | pt_reg_t regs[53]; | ||
63 | pt_reg_t tp; /* aliases regs[TREG_TP] */ | ||
64 | pt_reg_t sp; /* aliases regs[TREG_SP] */ | ||
65 | pt_reg_t lr; /* aliases regs[TREG_LR] */ | ||
66 | |||
67 | /* Saved special registers. */ | ||
68 | pt_reg_t pc; /* stored in EX_CONTEXT_1_0 */ | ||
69 | pt_reg_t ex1; /* stored in EX_CONTEXT_1_1 (PL and ICS bit) */ | ||
70 | pt_reg_t faultnum; /* fault number (INT_SWINT_1 for syscall) */ | ||
71 | pt_reg_t orig_r0; /* r0 at syscall entry, else zero */ | ||
72 | pt_reg_t flags; /* flags (see below) */ | ||
73 | #if !CHIP_HAS_CMPEXCH() | ||
74 | pt_reg_t pad[3]; | ||
75 | #else | ||
76 | pt_reg_t cmpexch; /* value of CMPEXCH_VALUE SPR at interrupt */ | ||
77 | pt_reg_t pad[2]; | ||
78 | #endif | ||
79 | }; | ||
80 | |||
81 | #endif /* __ASSEMBLY__ */ | ||
82 | |||
83 | /* Flag bits in pt_regs.flags */ | ||
84 | #define PT_FLAGS_DISABLE_IRQ 1 /* on return to kernel, disable irqs */ | ||
85 | #define PT_FLAGS_CALLER_SAVES 2 /* caller-save registers are valid */ | ||
86 | #define PT_FLAGS_RESTORE_REGS 4 /* restore callee-save regs on return */ | ||
87 | |||
88 | #define PTRACE_GETREGS 12 | ||
89 | #define PTRACE_SETREGS 13 | ||
90 | #define PTRACE_GETFPREGS 14 | ||
91 | #define PTRACE_SETFPREGS 15 | ||
92 | |||
93 | /* Support TILE-specific ptrace options, with events starting at 16. */ | ||
94 | #define PTRACE_O_TRACEMIGRATE 0x00010000 | ||
95 | #define PTRACE_EVENT_MIGRATE 16 | ||
96 | #ifdef __KERNEL__ | ||
97 | #define PTRACE_O_MASK_TILE (PTRACE_O_TRACEMIGRATE) | ||
98 | #define PT_TRACE_MIGRATE 0x00080000 | ||
99 | #define PT_TRACE_MASK_TILE (PT_TRACE_MIGRATE) | ||
100 | #endif | ||
101 | |||
102 | #ifdef __KERNEL__ | ||
103 | |||
104 | #ifndef __ASSEMBLY__ | ||
105 | |||
106 | #define instruction_pointer(regs) ((regs)->pc) | ||
107 | #define profile_pc(regs) instruction_pointer(regs) | ||
108 | |||
109 | /* Does the process account for user or for system time? */ | ||
110 | #define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL) | ||
111 | |||
112 | /* Fill in a struct pt_regs with the current kernel registers. */ | ||
113 | struct pt_regs *get_pt_regs(struct pt_regs *); | ||
114 | |||
115 | extern void show_regs(struct pt_regs *); | ||
116 | |||
117 | #define arch_has_single_step() (1) | ||
118 | |||
119 | /* | ||
120 | * A structure for all single-stepper state. | ||
121 | * | ||
122 | * Also update defines in assembler section if it changes | ||
123 | */ | ||
124 | struct single_step_state { | ||
125 | /* the page to which we will write hacked-up bundles */ | ||
126 | void *buffer; | ||
127 | |||
128 | union { | ||
129 | int flags; | ||
130 | struct { | ||
131 | unsigned long is_enabled:1, update:1, update_reg:6; | ||
132 | }; | ||
133 | }; | ||
134 | |||
135 | unsigned long orig_pc; /* the original PC */ | ||
136 | unsigned long next_pc; /* return PC if no branch (PC + 1) */ | ||
137 | unsigned long branch_next_pc; /* return PC if we did branch/jump */ | ||
138 | unsigned long update_value; /* value to restore to update_target */ | ||
139 | }; | ||
140 | |||
141 | /* Single-step the instruction at regs->pc */ | ||
142 | extern void single_step_once(struct pt_regs *regs); | ||
143 | |||
144 | struct task_struct; | ||
145 | |||
146 | extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, | ||
147 | int error_code); | ||
148 | |||
149 | #ifdef __tilegx__ | ||
150 | /* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */ | ||
151 | #define __ARCH_WANT_COMPAT_SYS_PTRACE | ||
152 | #endif | ||
153 | |||
154 | #endif /* !__ASSEMBLY__ */ | ||
155 | |||
156 | #define SINGLESTEP_STATE_MASK_IS_ENABLED 0x1 | ||
157 | #define SINGLESTEP_STATE_MASK_UPDATE 0x2 | ||
158 | #define SINGLESTEP_STATE_TARGET_LB 2 | ||
159 | #define SINGLESTEP_STATE_TARGET_UB 7 | ||
160 | |||
161 | #endif /* !__KERNEL__ */ | ||
162 | |||
163 | #endif /* _ASM_TILE_PTRACE_H */ | ||
diff --git a/arch/tile/include/asm/resource.h b/arch/tile/include/asm/resource.h new file mode 100644 index 000000000000..04bc4db8921b --- /dev/null +++ b/arch/tile/include/asm/resource.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/resource.h> | |||
diff --git a/arch/tile/include/asm/scatterlist.h b/arch/tile/include/asm/scatterlist.h new file mode 100644 index 000000000000..c5604242c0d5 --- /dev/null +++ b/arch/tile/include/asm/scatterlist.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SCATTERLIST_H | ||
16 | #define _ASM_TILE_SCATTERLIST_H | ||
17 | |||
18 | #define ISA_DMA_THRESHOLD (~0UL) | ||
19 | |||
20 | #include <asm-generic/scatterlist.h> | ||
21 | |||
22 | #endif /* _ASM_TILE_SCATTERLIST_H */ | ||
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h new file mode 100644 index 000000000000..6c111491f0ed --- /dev/null +++ b/arch/tile/include/asm/sections.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SECTIONS_H | ||
16 | #define _ASM_TILE_SECTIONS_H | ||
17 | |||
18 | #define arch_is_kernel_data arch_is_kernel_data | ||
19 | |||
20 | #include <asm-generic/sections.h> | ||
21 | |||
22 | /* Text and data are at different areas in the kernel VA space. */ | ||
23 | extern char _sinitdata[], _einitdata[]; | ||
24 | |||
25 | /* Write-once data is writable only till the end of initialization. */ | ||
26 | extern char __w1data_begin[], __w1data_end[]; | ||
27 | |||
28 | extern char __feedback_section_start[], __feedback_section_end[]; | ||
29 | |||
30 | /* Handle the discontiguity between _sdata and _stext. */ | ||
31 | static inline int arch_is_kernel_data(unsigned long addr) | ||
32 | { | ||
33 | return addr >= (unsigned long)_sdata && | ||
34 | addr < (unsigned long)_end; | ||
35 | } | ||
36 | |||
37 | #endif /* _ASM_TILE_SECTIONS_H */ | ||
diff --git a/arch/tile/include/asm/sembuf.h b/arch/tile/include/asm/sembuf.h new file mode 100644 index 000000000000..7673b83cfef7 --- /dev/null +++ b/arch/tile/include/asm/sembuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sembuf.h> | |||
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h new file mode 100644 index 000000000000..823ddd47ff6e --- /dev/null +++ b/arch/tile/include/asm/setup.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SETUP_H | ||
16 | #define _ASM_TILE_SETUP_H | ||
17 | |||
18 | #include <linux/pfn.h> | ||
19 | #include <linux/init.h> | ||
20 | |||
21 | /* | ||
22 | * Reserved space for vmalloc and iomap - defined in asm/page.h | ||
23 | */ | ||
24 | #define MAXMEM_PFN PFN_DOWN(MAXMEM) | ||
25 | |||
26 | #define COMMAND_LINE_SIZE 2048 | ||
27 | |||
28 | void early_panic(const char *fmt, ...); | ||
29 | void warn_early_printk(void); | ||
30 | void __init disable_early_printk(void); | ||
31 | |||
32 | #endif /* _ASM_TILE_SETUP_H */ | ||
diff --git a/arch/tile/include/asm/shmbuf.h b/arch/tile/include/asm/shmbuf.h new file mode 100644 index 000000000000..83c05fc2de38 --- /dev/null +++ b/arch/tile/include/asm/shmbuf.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmbuf.h> | |||
diff --git a/arch/tile/include/asm/shmparam.h b/arch/tile/include/asm/shmparam.h new file mode 100644 index 000000000000..93f30deb95d0 --- /dev/null +++ b/arch/tile/include/asm/shmparam.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/shmparam.h> | |||
diff --git a/arch/tile/include/asm/sigcontext.h b/arch/tile/include/asm/sigcontext.h new file mode 100644 index 000000000000..7cd7672e3ad4 --- /dev/null +++ b/arch/tile/include/asm/sigcontext.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGCONTEXT_H | ||
16 | #define _ASM_TILE_SIGCONTEXT_H | ||
17 | |||
18 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
19 | #include <asm/ptrace.h> | ||
20 | |||
21 | /* Must track <sys/ucontext.h> */ | ||
22 | |||
23 | struct sigcontext { | ||
24 | struct pt_regs regs; | ||
25 | }; | ||
26 | |||
27 | #endif /* _ASM_TILE_SIGCONTEXT_H */ | ||
diff --git a/arch/tile/include/asm/sigframe.h b/arch/tile/include/asm/sigframe.h new file mode 100644 index 000000000000..994d3d30205f --- /dev/null +++ b/arch/tile/include/asm/sigframe.h | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGFRAME_H | ||
16 | #define _ASM_TILE_SIGFRAME_H | ||
17 | |||
18 | /* Indicate that syscall return should not examine r0 */ | ||
19 | #define INT_SWINT_1_SIGRETURN (~0) | ||
20 | |||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | #include <arch/abi.h> | ||
24 | |||
25 | struct rt_sigframe { | ||
26 | unsigned char save_area[C_ABI_SAVE_AREA_SIZE]; /* caller save area */ | ||
27 | struct siginfo info; | ||
28 | struct ucontext uc; | ||
29 | }; | ||
30 | |||
31 | #endif /* !__ASSEMBLY__ */ | ||
32 | |||
33 | #endif /* _ASM_TILE_SIGFRAME_H */ | ||
diff --git a/arch/tile/include/asm/siginfo.h b/arch/tile/include/asm/siginfo.h new file mode 100644 index 000000000000..0c12d1b9ddf2 --- /dev/null +++ b/arch/tile/include/asm/siginfo.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGINFO_H | ||
16 | #define _ASM_TILE_SIGINFO_H | ||
17 | |||
18 | #define __ARCH_SI_TRAPNO | ||
19 | |||
20 | #include <asm-generic/siginfo.h> | ||
21 | |||
22 | /* | ||
23 | * Additional Tile-specific SIGILL si_codes | ||
24 | */ | ||
25 | #define ILL_DBLFLT (__SI_FAULT|9) /* double fault */ | ||
26 | #define ILL_HARDWALL (__SI_FAULT|10) /* user networks hardwall violation */ | ||
27 | #undef NSIGILL | ||
28 | #define NSIGILL 10 | ||
29 | |||
30 | #endif /* _ASM_TILE_SIGINFO_H */ | ||
diff --git a/arch/tile/include/asm/signal.h b/arch/tile/include/asm/signal.h new file mode 100644 index 000000000000..d20d326d201b --- /dev/null +++ b/arch/tile/include/asm/signal.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SIGNAL_H | ||
16 | #define _ASM_TILE_SIGNAL_H | ||
17 | |||
18 | /* Do not notify a ptracer when this signal is handled. */ | ||
19 | #define SA_NOPTRACE 0x02000000u | ||
20 | |||
21 | /* Used in earlier Tilera releases, so keeping for binary compatibility. */ | ||
22 | #define SA_RESTORER 0x04000000u | ||
23 | |||
24 | #include <asm-generic/signal.h> | ||
25 | |||
26 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | ||
27 | int restore_sigcontext(struct pt_regs *, struct sigcontext __user *, long *); | ||
28 | int setup_sigcontext(struct sigcontext __user *, struct pt_regs *); | ||
29 | #endif | ||
30 | |||
31 | #endif /* _ASM_TILE_SIGNAL_H */ | ||
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h new file mode 100644 index 000000000000..da24858a7392 --- /dev/null +++ b/arch/tile/include/asm/smp.h | |||
@@ -0,0 +1,126 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SMP_H | ||
16 | #define _ASM_TILE_SMP_H | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | |||
20 | #include <asm/processor.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/irqreturn.h> | ||
23 | |||
24 | /* Set up this tile to support receiving hypervisor messages */ | ||
25 | void init_messaging(void); | ||
26 | |||
27 | /* Set up this tile to support receiving device interrupts and IPIs. */ | ||
28 | void init_per_tile_IRQs(void); | ||
29 | |||
30 | /* Send a message to processors specified in mask */ | ||
31 | void send_IPI_many(const struct cpumask *mask, int tag); | ||
32 | |||
33 | /* Send a message to all but the sending processor */ | ||
34 | void send_IPI_allbutself(int tag); | ||
35 | |||
36 | /* Send a message to a specific processor */ | ||
37 | void send_IPI_single(int dest, int tag); | ||
38 | |||
39 | /* Process an IPI message */ | ||
40 | void evaluate_message(int tag); | ||
41 | |||
42 | /* Process an IRQ_RESCHEDULE IPI. */ | ||
43 | irqreturn_t handle_reschedule_ipi(int irq, void *token); | ||
44 | |||
45 | /* Boot a secondary cpu */ | ||
46 | void online_secondary(void); | ||
47 | |||
48 | /* Call a function on a specified set of CPUs (may include this one). */ | ||
49 | extern void on_each_cpu_mask(const struct cpumask *mask, | ||
50 | void (*func)(void *), void *info, bool wait); | ||
51 | |||
52 | /* Topology of the supervisor tile grid, and coordinates of boot processor */ | ||
53 | extern HV_Topology smp_topology; | ||
54 | |||
55 | /* Accessors for grid size */ | ||
56 | #define smp_height (smp_topology.height) | ||
57 | #define smp_width (smp_topology.width) | ||
58 | |||
59 | /* Hypervisor message tags sent via the tile send_IPI*() routines. */ | ||
60 | #define MSG_TAG_START_CPU 1 | ||
61 | #define MSG_TAG_STOP_CPU 2 | ||
62 | #define MSG_TAG_CALL_FUNCTION_MANY 3 | ||
63 | #define MSG_TAG_CALL_FUNCTION_SINGLE 4 | ||
64 | |||
65 | /* Hook for the generic smp_call_function_many() routine. */ | ||
66 | static inline void arch_send_call_function_ipi_mask(struct cpumask *mask) | ||
67 | { | ||
68 | send_IPI_many(mask, MSG_TAG_CALL_FUNCTION_MANY); | ||
69 | } | ||
70 | |||
71 | /* Hook for the generic smp_call_function_single() routine. */ | ||
72 | static inline void arch_send_call_function_single_ipi(int cpu) | ||
73 | { | ||
74 | send_IPI_single(cpu, MSG_TAG_CALL_FUNCTION_SINGLE); | ||
75 | } | ||
76 | |||
77 | /* Print out the boot string describing which cpus were disabled. */ | ||
78 | void print_disabled_cpus(void); | ||
79 | |||
80 | #else /* !CONFIG_SMP */ | ||
81 | |||
82 | #define on_each_cpu_mask(mask, func, info, wait) \ | ||
83 | do { if (cpumask_test_cpu(0, (mask))) func(info); } while (0) | ||
84 | |||
85 | #define smp_master_cpu 0 | ||
86 | #define smp_height 1 | ||
87 | #define smp_width 1 | ||
88 | |||
89 | #endif /* !CONFIG_SMP */ | ||
90 | |||
91 | |||
92 | /* Which cpus may be used as the lotar in a page table entry. */ | ||
93 | extern struct cpumask cpu_lotar_map; | ||
94 | #define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) | ||
95 | |||
96 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
97 | /* Which processors are used for hash-for-home mapping */ | ||
98 | extern struct cpumask hash_for_home_map; | ||
99 | #endif | ||
100 | |||
101 | /* Which cpus can have their cache flushed by hv_flush_remote(). */ | ||
102 | extern struct cpumask cpu_cacheable_map; | ||
103 | #define cpu_cacheable(cpu) cpumask_test_cpu((cpu), &cpu_cacheable_map) | ||
104 | |||
105 | /* Convert an HV_LOTAR value into a cpu. */ | ||
106 | static inline int hv_lotar_to_cpu(HV_LOTAR lotar) | ||
107 | { | ||
108 | return HV_LOTAR_X(lotar) + (HV_LOTAR_Y(lotar) * smp_width); | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Extension of <linux/cpumask.h> functionality when you just want | ||
113 | * to express a mask or suppression or inclusion region without | ||
114 | * being too concerned about exactly which cpus are valid in that region. | ||
115 | */ | ||
116 | int bitmap_parselist_crop(const char *bp, unsigned long *maskp, int nmaskbits); | ||
117 | |||
118 | #define cpulist_parse_crop(buf, dst) \ | ||
119 | __cpulist_parse_crop((buf), (dst), NR_CPUS) | ||
120 | static inline int __cpulist_parse_crop(const char *buf, struct cpumask *dstp, | ||
121 | int nbits) | ||
122 | { | ||
123 | return bitmap_parselist_crop(buf, cpumask_bits(dstp), nbits); | ||
124 | } | ||
125 | |||
126 | #endif /* _ASM_TILE_SMP_H */ | ||
diff --git a/arch/tile/include/asm/socket.h b/arch/tile/include/asm/socket.h new file mode 100644 index 000000000000..6b71384b9d8b --- /dev/null +++ b/arch/tile/include/asm/socket.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/socket.h> | |||
diff --git a/arch/tile/include/asm/sockios.h b/arch/tile/include/asm/sockios.h new file mode 100644 index 000000000000..def6d4746ee7 --- /dev/null +++ b/arch/tile/include/asm/sockios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/sockios.h> | |||
diff --git a/arch/tile/include/asm/spinlock.h b/arch/tile/include/asm/spinlock.h new file mode 100644 index 000000000000..1a8bd4740c28 --- /dev/null +++ b/arch/tile/include/asm/spinlock.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_H | ||
16 | #define _ASM_TILE_SPINLOCK_H | ||
17 | |||
18 | #ifdef __tilegx__ | ||
19 | #include <asm/spinlock_64.h> | ||
20 | #else | ||
21 | #include <asm/spinlock_32.h> | ||
22 | #endif | ||
23 | |||
24 | #endif /* _ASM_TILE_SPINLOCK_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_32.h b/arch/tile/include/asm/spinlock_32.h new file mode 100644 index 000000000000..f3a8473c68da --- /dev/null +++ b/arch/tile/include/asm/spinlock_32.h | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * 32-bit SMP spinlocks. | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_TILE_SPINLOCK_32_H | ||
18 | #define _ASM_TILE_SPINLOCK_32_H | ||
19 | |||
20 | #include <asm/atomic.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <linux/compiler.h> | ||
24 | |||
25 | /* | ||
26 | * We only use even ticket numbers so the '1' inserted by a tns is | ||
27 | * an unambiguous "ticket is busy" flag. | ||
28 | */ | ||
29 | #define TICKET_QUANTUM 2 | ||
30 | |||
31 | |||
32 | /* | ||
33 | * SMP ticket spinlocks, allowing only a single CPU anywhere | ||
34 | * | ||
35 | * (the type definitions are in asm/spinlock_types.h) | ||
36 | */ | ||
37 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
38 | { | ||
39 | /* | ||
40 | * Note that even if a new ticket is in the process of being | ||
41 | * acquired, so lock->next_ticket is 1, it's still reasonable | ||
42 | * to claim the lock is held, since it will be momentarily | ||
43 | * if not already. There's no need to wait for a "valid" | ||
44 | * lock->next_ticket to become available. | ||
45 | */ | ||
46 | return lock->next_ticket != lock->current_ticket; | ||
47 | } | ||
48 | |||
49 | void arch_spin_lock(arch_spinlock_t *lock); | ||
50 | |||
51 | /* We cannot take an interrupt after getting a ticket, so don't enable them. */ | ||
52 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
53 | |||
54 | int arch_spin_trylock(arch_spinlock_t *lock); | ||
55 | |||
56 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
57 | { | ||
58 | /* For efficiency, overlap fetching the old ticket with the wmb(). */ | ||
59 | int old_ticket = lock->current_ticket; | ||
60 | wmb(); /* guarantee anything modified under the lock is visible */ | ||
61 | lock->current_ticket = old_ticket + TICKET_QUANTUM; | ||
62 | } | ||
63 | |||
64 | void arch_spin_unlock_wait(arch_spinlock_t *lock); | ||
65 | |||
66 | /* | ||
67 | * Read-write spinlocks, allowing multiple readers | ||
68 | * but only one writer. | ||
69 | * | ||
70 | * We use a "tns/store-back" technique on a single word to manage | ||
71 | * the lock state, looping around to retry if the tns returns 1. | ||
72 | */ | ||
73 | |||
74 | /* Internal layout of the word; do not use. */ | ||
75 | #define _WR_NEXT_SHIFT 8 | ||
76 | #define _WR_CURR_SHIFT 16 | ||
77 | #define _WR_WIDTH 8 | ||
78 | #define _RD_COUNT_SHIFT 24 | ||
79 | #define _RD_COUNT_WIDTH 8 | ||
80 | |||
81 | /* Internal functions; do not use. */ | ||
82 | void arch_read_lock_slow(arch_rwlock_t *, u32); | ||
83 | int arch_read_trylock_slow(arch_rwlock_t *); | ||
84 | void arch_read_unlock_slow(arch_rwlock_t *); | ||
85 | void arch_write_lock_slow(arch_rwlock_t *, u32); | ||
86 | void arch_write_unlock_slow(arch_rwlock_t *, u32); | ||
87 | |||
88 | /** | ||
89 | * arch_read_can_lock() - would read_trylock() succeed? | ||
90 | */ | ||
91 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | ||
92 | { | ||
93 | return (rwlock->lock << _RD_COUNT_WIDTH) == 0; | ||
94 | } | ||
95 | |||
96 | /** | ||
97 | * arch_write_can_lock() - would write_trylock() succeed? | ||
98 | */ | ||
99 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | ||
100 | { | ||
101 | return rwlock->lock == 0; | ||
102 | } | ||
103 | |||
104 | /** | ||
105 | * arch_read_lock() - acquire a read lock. | ||
106 | */ | ||
107 | static inline void arch_read_lock(arch_rwlock_t *rwlock) | ||
108 | { | ||
109 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
110 | if (unlikely(val << _RD_COUNT_WIDTH)) { | ||
111 | arch_read_lock_slow(rwlock, val); | ||
112 | return; | ||
113 | } | ||
114 | rwlock->lock = val + (1 << _RD_COUNT_SHIFT); | ||
115 | } | ||
116 | |||
117 | /** | ||
118 | * arch_read_lock() - acquire a write lock. | ||
119 | */ | ||
120 | static inline void arch_write_lock(arch_rwlock_t *rwlock) | ||
121 | { | ||
122 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
123 | if (unlikely(val != 0)) { | ||
124 | arch_write_lock_slow(rwlock, val); | ||
125 | return; | ||
126 | } | ||
127 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * arch_read_trylock() - try to acquire a read lock. | ||
132 | */ | ||
133 | static inline int arch_read_trylock(arch_rwlock_t *rwlock) | ||
134 | { | ||
135 | int locked; | ||
136 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
137 | if (unlikely(val & 1)) { | ||
138 | return arch_read_trylock_slow(rwlock); | ||
139 | } | ||
140 | locked = (val << _RD_COUNT_WIDTH) == 0; | ||
141 | rwlock->lock = val + (locked << _RD_COUNT_SHIFT); | ||
142 | return locked; | ||
143 | } | ||
144 | |||
145 | /** | ||
146 | * arch_write_trylock() - try to acquire a write lock. | ||
147 | */ | ||
148 | static inline int arch_write_trylock(arch_rwlock_t *rwlock) | ||
149 | { | ||
150 | u32 val = __insn_tns((int *)&rwlock->lock); | ||
151 | |||
152 | /* | ||
153 | * If a tns is in progress, or there's a waiting or active locker, | ||
154 | * or active readers, we can't take the lock, so give up. | ||
155 | */ | ||
156 | if (unlikely(val != 0)) { | ||
157 | if (!(val & 1)) | ||
158 | rwlock->lock = val; | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | /* Set the "next" field to mark it locked. */ | ||
163 | rwlock->lock = 1 << _WR_NEXT_SHIFT; | ||
164 | return 1; | ||
165 | } | ||
166 | |||
167 | /** | ||
168 | * arch_read_unlock() - release a read lock. | ||
169 | */ | ||
170 | static inline void arch_read_unlock(arch_rwlock_t *rwlock) | ||
171 | { | ||
172 | u32 val; | ||
173 | mb(); /* guarantee anything modified under the lock is visible */ | ||
174 | val = __insn_tns((int *)&rwlock->lock); | ||
175 | if (unlikely(val & 1)) { | ||
176 | arch_read_unlock_slow(rwlock); | ||
177 | return; | ||
178 | } | ||
179 | rwlock->lock = val - (1 << _RD_COUNT_SHIFT); | ||
180 | } | ||
181 | |||
182 | /** | ||
183 | * arch_write_unlock() - release a write lock. | ||
184 | */ | ||
185 | static inline void arch_write_unlock(arch_rwlock_t *rwlock) | ||
186 | { | ||
187 | u32 val; | ||
188 | mb(); /* guarantee anything modified under the lock is visible */ | ||
189 | val = __insn_tns((int *)&rwlock->lock); | ||
190 | if (unlikely(val != (1 << _WR_NEXT_SHIFT))) { | ||
191 | arch_write_unlock_slow(rwlock, val); | ||
192 | return; | ||
193 | } | ||
194 | rwlock->lock = 0; | ||
195 | } | ||
196 | |||
197 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
198 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
199 | |||
200 | #endif /* _ASM_TILE_SPINLOCK_32_H */ | ||
diff --git a/arch/tile/include/asm/spinlock_types.h b/arch/tile/include/asm/spinlock_types.h new file mode 100644 index 000000000000..a71f59b49c50 --- /dev/null +++ b/arch/tile/include/asm/spinlock_types.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SPINLOCK_TYPES_H | ||
16 | #define _ASM_TILE_SPINLOCK_TYPES_H | ||
17 | |||
18 | #ifndef __LINUX_SPINLOCK_TYPES_H | ||
19 | # error "please don't include this file directly" | ||
20 | #endif | ||
21 | |||
22 | #ifdef __tilegx__ | ||
23 | |||
24 | /* Low 15 bits are "next"; high 15 bits are "current". */ | ||
25 | typedef struct arch_spinlock { | ||
26 | unsigned int lock; | ||
27 | } arch_spinlock_t; | ||
28 | |||
29 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | ||
30 | |||
31 | /* High bit is "writer owns"; low 31 bits are a count of readers. */ | ||
32 | typedef struct arch_rwlock { | ||
33 | unsigned int lock; | ||
34 | } arch_rwlock_t; | ||
35 | |||
36 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
37 | |||
38 | #else | ||
39 | |||
40 | typedef struct arch_spinlock { | ||
41 | /* Next ticket number to hand out. */ | ||
42 | int next_ticket; | ||
43 | /* The ticket number that currently owns this lock. */ | ||
44 | int current_ticket; | ||
45 | } arch_spinlock_t; | ||
46 | |||
47 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0, 0 } | ||
48 | |||
49 | /* | ||
50 | * Byte 0 for tns (only the low bit is used), byte 1 for ticket-lock "next", | ||
51 | * byte 2 for ticket-lock "current", byte 3 for reader count. | ||
52 | */ | ||
53 | typedef struct arch_rwlock { | ||
54 | unsigned int lock; | ||
55 | } arch_rwlock_t; | ||
56 | |||
57 | #define __ARCH_RW_LOCK_UNLOCKED { 0 } | ||
58 | |||
59 | #endif | ||
60 | #endif /* _ASM_TILE_SPINLOCK_TYPES_H */ | ||
diff --git a/arch/tile/include/asm/stack.h b/arch/tile/include/asm/stack.h new file mode 100644 index 000000000000..864913bcfbc9 --- /dev/null +++ b/arch/tile/include/asm/stack.h | |||
@@ -0,0 +1,68 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STACK_H | ||
16 | #define _ASM_TILE_STACK_H | ||
17 | |||
18 | #include <linux/types.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <asm/backtrace.h> | ||
21 | #include <hv/hypervisor.h> | ||
22 | |||
23 | /* Everything we need to keep track of a backtrace iteration */ | ||
24 | struct KBacktraceIterator { | ||
25 | BacktraceIterator it; | ||
26 | struct task_struct *task; /* task we are backtracing */ | ||
27 | HV_PTE *pgtable; /* page table for user space access */ | ||
28 | int end; /* iteration complete. */ | ||
29 | int new_context; /* new context is starting */ | ||
30 | int profile; /* profiling, so stop on async intrpt */ | ||
31 | int verbose; /* printk extra info (don't want to | ||
32 | * do this for profiling) */ | ||
33 | int is_current; /* backtracing current task */ | ||
34 | }; | ||
35 | |||
36 | /* Iteration methods for kernel backtraces */ | ||
37 | |||
38 | /* | ||
39 | * Initialize a KBacktraceIterator from a task_struct, and optionally from | ||
40 | * a set of registers. If the registers are omitted, the process is | ||
41 | * assumed to be descheduled, and registers are read from the process's | ||
42 | * thread_struct and stack. "verbose" means to printk some additional | ||
43 | * information about fault handlers as we pass them on the stack. | ||
44 | */ | ||
45 | extern void KBacktraceIterator_init(struct KBacktraceIterator *kbt, | ||
46 | struct task_struct *, struct pt_regs *); | ||
47 | |||
48 | /* Initialize iterator based on current stack. */ | ||
49 | extern void KBacktraceIterator_init_current(struct KBacktraceIterator *kbt); | ||
50 | |||
51 | /* No more frames? */ | ||
52 | extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt); | ||
53 | |||
54 | /* Advance to the next frame. */ | ||
55 | extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt); | ||
56 | |||
57 | /* | ||
58 | * Dump stack given complete register info. Use only from the | ||
59 | * architecture-specific code; show_stack() | ||
60 | * and dump_stack() (in entry.S) are architecture-independent entry points. | ||
61 | */ | ||
62 | extern void tile_show_stack(struct KBacktraceIterator *, int headers); | ||
63 | |||
64 | /* Dump stack of current process, with registers to seed the backtrace. */ | ||
65 | extern void dump_stack_regs(struct pt_regs *); | ||
66 | |||
67 | |||
68 | #endif /* _ASM_TILE_STACK_H */ | ||
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h new file mode 100644 index 000000000000..3dc90fa92c70 --- /dev/null +++ b/arch/tile/include/asm/stat.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/stat.h> | |||
diff --git a/arch/tile/include/asm/statfs.h b/arch/tile/include/asm/statfs.h new file mode 100644 index 000000000000..0b91fe198c20 --- /dev/null +++ b/arch/tile/include/asm/statfs.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/statfs.h> | |||
diff --git a/arch/tile/include/asm/string.h b/arch/tile/include/asm/string.h new file mode 100644 index 000000000000..7535cf1a30e4 --- /dev/null +++ b/arch/tile/include/asm/string.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_STRING_H | ||
16 | #define _ASM_TILE_STRING_H | ||
17 | |||
18 | #define __HAVE_ARCH_MEMCHR | ||
19 | #define __HAVE_ARCH_MEMSET | ||
20 | #define __HAVE_ARCH_MEMCPY | ||
21 | #define __HAVE_ARCH_MEMMOVE | ||
22 | #define __HAVE_ARCH_STRCHR | ||
23 | #define __HAVE_ARCH_STRLEN | ||
24 | |||
25 | extern __kernel_size_t strlen(const char *); | ||
26 | extern char *strchr(const char *s, int c); | ||
27 | extern void *memchr(const void *s, int c, size_t n); | ||
28 | extern void *memset(void *, int, __kernel_size_t); | ||
29 | extern void *memcpy(void *, const void *, __kernel_size_t); | ||
30 | extern void *memmove(void *, const void *, __kernel_size_t); | ||
31 | |||
32 | #endif /* _ASM_TILE_STRING_H */ | ||
diff --git a/arch/tile/include/asm/swab.h b/arch/tile/include/asm/swab.h new file mode 100644 index 000000000000..25c686a00f1d --- /dev/null +++ b/arch/tile/include/asm/swab.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SWAB_H | ||
16 | #define _ASM_TILE_SWAB_H | ||
17 | |||
18 | /* Tile gcc is always >= 4.3.0, so we use __builtin_bswap. */ | ||
19 | #define __arch_swab32(x) __builtin_bswap32(x) | ||
20 | #define __arch_swab64(x) __builtin_bswap64(x) | ||
21 | |||
22 | /* Use the variant that is natural for the wordsize. */ | ||
23 | #ifdef CONFIG_64BIT | ||
24 | #define __arch_swab16(x) (__builtin_bswap64(x) >> 48) | ||
25 | #else | ||
26 | #define __arch_swab16(x) (__builtin_bswap32(x) >> 16) | ||
27 | #endif | ||
28 | |||
29 | #endif /* _ASM_TILE_SWAB_H */ | ||
diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h new file mode 100644 index 000000000000..d35e0dcb67b1 --- /dev/null +++ b/arch/tile/include/asm/syscall.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * See asm-generic/syscall.h for descriptions of what we must do here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALL_H | ||
19 | #define _ASM_TILE_SYSCALL_H | ||
20 | |||
21 | #include <linux/sched.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <arch/abi.h> | ||
24 | |||
25 | /* | ||
26 | * Only the low 32 bits of orig_r0 are meaningful, so we return int. | ||
27 | * This importantly ignores the high bits on 64-bit, so comparisons | ||
28 | * sign-extend the low 32 bits. | ||
29 | */ | ||
30 | static inline int syscall_get_nr(struct task_struct *t, struct pt_regs *regs) | ||
31 | { | ||
32 | return regs->regs[TREG_SYSCALL_NR]; | ||
33 | } | ||
34 | |||
35 | static inline void syscall_rollback(struct task_struct *task, | ||
36 | struct pt_regs *regs) | ||
37 | { | ||
38 | regs->regs[0] = regs->orig_r0; | ||
39 | } | ||
40 | |||
41 | static inline long syscall_get_error(struct task_struct *task, | ||
42 | struct pt_regs *regs) | ||
43 | { | ||
44 | unsigned long error = regs->regs[0]; | ||
45 | return IS_ERR_VALUE(error) ? error : 0; | ||
46 | } | ||
47 | |||
48 | static inline long syscall_get_return_value(struct task_struct *task, | ||
49 | struct pt_regs *regs) | ||
50 | { | ||
51 | return regs->regs[0]; | ||
52 | } | ||
53 | |||
54 | static inline void syscall_set_return_value(struct task_struct *task, | ||
55 | struct pt_regs *regs, | ||
56 | int error, long val) | ||
57 | { | ||
58 | regs->regs[0] = (long) error ?: val; | ||
59 | } | ||
60 | |||
61 | static inline void syscall_get_arguments(struct task_struct *task, | ||
62 | struct pt_regs *regs, | ||
63 | unsigned int i, unsigned int n, | ||
64 | unsigned long *args) | ||
65 | { | ||
66 | BUG_ON(i + n > 6); | ||
67 | memcpy(args, ®s[i], n * sizeof(args[0])); | ||
68 | } | ||
69 | |||
70 | static inline void syscall_set_arguments(struct task_struct *task, | ||
71 | struct pt_regs *regs, | ||
72 | unsigned int i, unsigned int n, | ||
73 | const unsigned long *args) | ||
74 | { | ||
75 | BUG_ON(i + n > 6); | ||
76 | memcpy(®s[i], args, n * sizeof(args[0])); | ||
77 | } | ||
78 | |||
79 | #endif /* _ASM_TILE_SYSCALL_H */ | ||
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h new file mode 100644 index 000000000000..e1be54d1a7d8 --- /dev/null +++ b/arch/tile/include/asm/syscalls.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * syscalls.h - Linux syscall interfaces (arch-specific) | ||
3 | * | ||
4 | * Copyright (c) 2008 Jaswinder Singh Rajput | ||
5 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation, version 2. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
14 | * NON INFRINGEMENT. See the GNU General Public License for | ||
15 | * more details. | ||
16 | */ | ||
17 | |||
18 | #ifndef _ASM_TILE_SYSCALLS_H | ||
19 | #define _ASM_TILE_SYSCALLS_H | ||
20 | |||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/linkage.h> | ||
23 | #include <linux/signal.h> | ||
24 | #include <linux/types.h> | ||
25 | |||
26 | /* kernel/process.c */ | ||
27 | int sys_fork(struct pt_regs *); | ||
28 | int sys_vfork(struct pt_regs *); | ||
29 | int sys_clone(unsigned long clone_flags, unsigned long newsp, | ||
30 | int __user *parent_tidptr, int __user *child_tidptr, | ||
31 | struct pt_regs *); | ||
32 | int sys_execve(char __user *path, char __user *__user *argv, | ||
33 | char __user *__user *envp, struct pt_regs *); | ||
34 | |||
35 | /* kernel/signal.c */ | ||
36 | int sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
37 | struct pt_regs *); | ||
38 | long sys_rt_sigreturn(struct pt_regs *); | ||
39 | int sys_raise_fpe(int code, unsigned long addr, struct pt_regs*); | ||
40 | |||
41 | /* kernel/sys.c */ | ||
42 | ssize_t sys32_readahead(int fd, u32 offset_lo, u32 offset_hi, u32 count); | ||
43 | long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi, | ||
44 | u32 len, int advice); | ||
45 | int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, | ||
46 | u32 len_lo, u32 len_hi, int advice); | ||
47 | long sys_flush_cache(void); | ||
48 | long sys_mmap(unsigned long addr, unsigned long len, | ||
49 | unsigned long prot, unsigned long flags, | ||
50 | unsigned long fd, unsigned long offset); | ||
51 | long sys_mmap2(unsigned long addr, unsigned long len, | ||
52 | unsigned long prot, unsigned long flags, | ||
53 | unsigned long fd, unsigned long offset); | ||
54 | |||
55 | #ifndef __tilegx__ | ||
56 | /* mm/fault.c */ | ||
57 | int sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *); | ||
58 | #endif | ||
59 | |||
60 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/system.h b/arch/tile/include/asm/system.h new file mode 100644 index 000000000000..d6ca7f816c87 --- /dev/null +++ b/arch/tile/include/asm/system.h | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_SYSTEM_H | ||
16 | #define _ASM_TILE_SYSTEM_H | ||
17 | |||
18 | #ifndef __ASSEMBLY__ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/irqflags.h> | ||
22 | |||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | ||
24 | #include <asm/ptrace.h> | ||
25 | |||
26 | #include <arch/chip.h> | ||
27 | #include <arch/sim_def.h> | ||
28 | #include <arch/spr_def.h> | ||
29 | |||
30 | /* | ||
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | ||
32 | * depend on. | ||
33 | * | ||
34 | * No data-dependent reads from memory-like regions are ever reordered | ||
35 | * over this barrier. All reads preceding this primitive are guaranteed | ||
36 | * to access memory (but not necessarily other CPUs' caches) before any | ||
37 | * reads following this primitive that depend on the data return by | ||
38 | * any of the preceding reads. This primitive is much lighter weight than | ||
39 | * rmb() on most CPUs, and is never heavier weight than is | ||
40 | * rmb(). | ||
41 | * | ||
42 | * These ordering constraints are respected by both the local CPU | ||
43 | * and the compiler. | ||
44 | * | ||
45 | * Ordering is not guaranteed by anything other than these primitives, | ||
46 | * not even by data dependencies. See the documentation for | ||
47 | * memory_barrier() for examples and URLs to more information. | ||
48 | * | ||
49 | * For example, the following code would force ordering (the initial | ||
50 | * value of "a" is zero, "b" is one, and "p" is "&a"): | ||
51 | * | ||
52 | * <programlisting> | ||
53 | * CPU 0 CPU 1 | ||
54 | * | ||
55 | * b = 2; | ||
56 | * memory_barrier(); | ||
57 | * p = &b; q = p; | ||
58 | * read_barrier_depends(); | ||
59 | * d = *q; | ||
60 | * </programlisting> | ||
61 | * | ||
62 | * because the read of "*q" depends on the read of "p" and these | ||
63 | * two reads are separated by a read_barrier_depends(). However, | ||
64 | * the following code, with the same initial values for "a" and "b": | ||
65 | * | ||
66 | * <programlisting> | ||
67 | * CPU 0 CPU 1 | ||
68 | * | ||
69 | * a = 2; | ||
70 | * memory_barrier(); | ||
71 | * b = 3; y = b; | ||
72 | * read_barrier_depends(); | ||
73 | * x = a; | ||
74 | * </programlisting> | ||
75 | * | ||
76 | * does not enforce ordering, since there is no data dependency between | ||
77 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | ||
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | ||
79 | * in cases like this where there are no data dependencies. | ||
80 | */ | ||
81 | |||
82 | #define read_barrier_depends() do { } while (0) | ||
83 | |||
84 | #define __sync() __insn_mf() | ||
85 | |||
86 | #if CHIP_HAS_SPLIT_CYCLE() | ||
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | ||
88 | #else | ||
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | ||
90 | #endif | ||
91 | |||
92 | /* Fence to guarantee visibility of stores to incoherent memory. */ | ||
93 | static inline void | ||
94 | mb_incoherent(void) | ||
95 | { | ||
96 | __insn_mf(); | ||
97 | |||
98 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | ||
99 | { | ||
100 | int __mb_incoherent(void); | ||
101 | #if CHIP_HAS_TILE_WRITE_PENDING() | ||
102 | const unsigned long WRITE_TIMEOUT_CYCLES = 400; | ||
103 | unsigned long start = get_cycles_low(); | ||
104 | do { | ||
105 | if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) | ||
106 | return; | ||
107 | } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); | ||
108 | #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ | ||
109 | (void) __mb_incoherent(); | ||
110 | } | ||
111 | #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ | ||
112 | } | ||
113 | |||
114 | #define fast_wmb() __sync() | ||
115 | #define fast_rmb() __sync() | ||
116 | #define fast_mb() __sync() | ||
117 | #define fast_iob() mb_incoherent() | ||
118 | |||
119 | #define wmb() fast_wmb() | ||
120 | #define rmb() fast_rmb() | ||
121 | #define mb() fast_mb() | ||
122 | #define iob() fast_iob() | ||
123 | |||
124 | #ifdef CONFIG_SMP | ||
125 | #define smp_mb() mb() | ||
126 | #define smp_rmb() rmb() | ||
127 | #define smp_wmb() wmb() | ||
128 | #define smp_read_barrier_depends() read_barrier_depends() | ||
129 | #else | ||
130 | #define smp_mb() barrier() | ||
131 | #define smp_rmb() barrier() | ||
132 | #define smp_wmb() barrier() | ||
133 | #define smp_read_barrier_depends() do { } while (0) | ||
134 | #endif | ||
135 | |||
136 | #define set_mb(var, value) \ | ||
137 | do { var = value; mb(); } while (0) | ||
138 | |||
139 | #include <linux/irqflags.h> | ||
140 | |||
141 | /* | ||
142 | * Pause the DMA engine and static network before task switching. | ||
143 | */ | ||
144 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | ||
145 | void _prepare_arch_switch(struct task_struct *next); | ||
146 | |||
147 | |||
148 | /* | ||
149 | * switch_to(n) should switch tasks to task nr n, first | ||
150 | * checking that n isn't the current task, in which case it does nothing. | ||
151 | * The number of callee-saved registers saved on the kernel stack | ||
152 | * is defined here for use in copy_thread() and must agree with __switch_to(). | ||
153 | */ | ||
154 | #endif /* !__ASSEMBLY__ */ | ||
155 | #define CALLEE_SAVED_FIRST_REG 30 | ||
156 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | ||
157 | #ifndef __ASSEMBLY__ | ||
158 | struct task_struct; | ||
159 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | ||
160 | extern struct task_struct *_switch_to(struct task_struct *prev, | ||
161 | struct task_struct *next); | ||
162 | |||
163 | /* | ||
164 | * On SMP systems, when the scheduler does migration-cost autodetection, | ||
165 | * it needs a way to flush as much of the CPU's caches as possible: | ||
166 | * | ||
167 | * TODO: fill this in! | ||
168 | */ | ||
169 | static inline void sched_cacheflush(void) | ||
170 | { | ||
171 | } | ||
172 | |||
173 | #define arch_align_stack(x) (x) | ||
174 | |||
175 | /* | ||
176 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | ||
177 | * intervention occurs and SIGBUS is delivered with no data address | ||
178 | * info. If 0, the kernel single-steps the instruction to discover | ||
179 | * the data address to provide with the SIGBUS. If 1, the kernel does | ||
180 | * a fixup. | ||
181 | */ | ||
182 | extern int unaligned_fixup; | ||
183 | |||
184 | /* Is the kernel printing on each unaligned fixup? */ | ||
185 | extern int unaligned_printk; | ||
186 | |||
187 | /* Number of unaligned fixups performed */ | ||
188 | extern unsigned int unaligned_fixup_count; | ||
189 | |||
190 | /* User-level DMA management functions */ | ||
191 | void grant_dma_mpls(void); | ||
192 | void restrict_dma_mpls(void); | ||
193 | |||
194 | |||
195 | /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ | ||
196 | extern int _sim_syscall(int syscall_num, ...); | ||
197 | #define sim_syscall(syscall_num, ...) \ | ||
198 | _sim_syscall(SIM_CONTROL_SYSCALL + \ | ||
199 | ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \ | ||
200 | ## __VA_ARGS__) | ||
201 | |||
202 | /* | ||
203 | * Kernel threads can check to see if they need to migrate their | ||
204 | * stack whenever they return from a context switch; for user | ||
205 | * threads, we defer until they are returning to user-space. | ||
206 | */ | ||
207 | #define finish_arch_switch(prev) do { \ | ||
208 | if (unlikely((prev)->state == TASK_DEAD)) \ | ||
209 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | ||
210 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
211 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | ||
212 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | ||
213 | if (current->mm == NULL && !kstack_hash && \ | ||
214 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | ||
215 | homecache_migrate_kthread(); \ | ||
216 | } while (0) | ||
217 | |||
218 | #endif /* !__ASSEMBLY__ */ | ||
219 | |||
220 | #endif /* _ASM_TILE_SYSTEM_H */ | ||
diff --git a/arch/tile/include/asm/termbits.h b/arch/tile/include/asm/termbits.h new file mode 100644 index 000000000000..3935b106de79 --- /dev/null +++ b/arch/tile/include/asm/termbits.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termbits.h> | |||
diff --git a/arch/tile/include/asm/termios.h b/arch/tile/include/asm/termios.h new file mode 100644 index 000000000000..280d78a9d966 --- /dev/null +++ b/arch/tile/include/asm/termios.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/termios.h> | |||
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h new file mode 100644 index 000000000000..9024bf3530aa --- /dev/null +++ b/arch/tile/include/asm/thread_info.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2002 David Howells (dhowells@redhat.com) | ||
3 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation, version 2. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, but | ||
10 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
12 | * NON INFRINGEMENT. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_THREAD_INFO_H | ||
17 | #define _ASM_TILE_THREAD_INFO_H | ||
18 | |||
19 | #include <asm/processor.h> | ||
20 | #include <asm/page.h> | ||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | /* | ||
24 | * Low level task data that assembly code needs immediate access to. | ||
25 | * The structure is placed at the bottom of the supervisor stack. | ||
26 | */ | ||
27 | struct thread_info { | ||
28 | struct task_struct *task; /* main task structure */ | ||
29 | struct exec_domain *exec_domain; /* execution domain */ | ||
30 | unsigned long flags; /* low level flags */ | ||
31 | unsigned long status; /* thread-synchronous flags */ | ||
32 | __u32 homecache_cpu; /* CPU we are homecached on */ | ||
33 | __u32 cpu; /* current CPU */ | ||
34 | int preempt_count; /* 0 => preemptable, | ||
35 | <0 => BUG */ | ||
36 | |||
37 | mm_segment_t addr_limit; /* thread address space | ||
38 | (KERNEL_DS or USER_DS) */ | ||
39 | struct restart_block restart_block; | ||
40 | struct single_step_state *step_state; /* single step state | ||
41 | (if non-zero) */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * macros/functions for gaining access to the thread information structure. | ||
46 | */ | ||
47 | #define INIT_THREAD_INFO(tsk) \ | ||
48 | { \ | ||
49 | .task = &tsk, \ | ||
50 | .exec_domain = &default_exec_domain, \ | ||
51 | .flags = 0, \ | ||
52 | .cpu = 0, \ | ||
53 | .preempt_count = INIT_PREEMPT_COUNT, \ | ||
54 | .addr_limit = KERNEL_DS, \ | ||
55 | .restart_block = { \ | ||
56 | .fn = do_no_restart_syscall, \ | ||
57 | }, \ | ||
58 | .step_state = 0, \ | ||
59 | } | ||
60 | |||
61 | #define init_thread_info (init_thread_union.thread_info) | ||
62 | #define init_stack (init_thread_union.stack) | ||
63 | |||
64 | #endif /* !__ASSEMBLY__ */ | ||
65 | |||
66 | #if PAGE_SIZE < 8192 | ||
67 | #define THREAD_SIZE_ORDER (13 - PAGE_SHIFT) | ||
68 | #else | ||
69 | #define THREAD_SIZE_ORDER (0) | ||
70 | #endif | ||
71 | |||
72 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) | ||
73 | #define LOG2_THREAD_SIZE (PAGE_SHIFT + THREAD_SIZE_ORDER) | ||
74 | |||
75 | #define STACK_WARN (THREAD_SIZE/8) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | |||
79 | /* How to get the thread information struct from C. */ | ||
80 | register unsigned long stack_pointer __asm__("sp"); | ||
81 | |||
82 | #define current_thread_info() \ | ||
83 | ((struct thread_info *)(stack_pointer & -THREAD_SIZE)) | ||
84 | |||
85 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | ||
86 | extern struct thread_info *alloc_thread_info(struct task_struct *task); | ||
87 | extern void free_thread_info(struct thread_info *info); | ||
88 | |||
89 | /* Switch boot idle thread to a freshly-allocated stack and free old stack. */ | ||
90 | extern void cpu_idle_on_new_stack(struct thread_info *old_ti, | ||
91 | unsigned long new_sp, | ||
92 | unsigned long new_ss10); | ||
93 | |||
94 | #else /* __ASSEMBLY__ */ | ||
95 | |||
96 | /* how to get the thread information struct from ASM */ | ||
97 | #ifdef __tilegx__ | ||
98 | #define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 | ||
99 | #else | ||
100 | #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 | ||
101 | #endif | ||
102 | |||
103 | #endif /* !__ASSEMBLY__ */ | ||
104 | |||
105 | #define PREEMPT_ACTIVE 0x10000000 | ||
106 | |||
107 | /* | ||
108 | * Thread information flags that various assembly files may need to access. | ||
109 | * Keep flags accessed frequently in low bits, particular since it makes | ||
110 | * it easier to build constants in assembly. | ||
111 | */ | ||
112 | #define TIF_SIGPENDING 0 /* signal pending */ | ||
113 | #define TIF_NEED_RESCHED 1 /* rescheduling necessary */ | ||
114 | #define TIF_SINGLESTEP 2 /* restore singlestep on return to | ||
115 | user mode */ | ||
116 | #define TIF_ASYNC_TLB 3 /* got an async TLB fault in kernel */ | ||
117 | #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ | ||
118 | #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ | ||
119 | #define TIF_SECCOMP 6 /* secure computing */ | ||
120 | #define TIF_MEMDIE 7 /* OOM killer at work */ | ||
121 | |||
122 | #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) | ||
123 | #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) | ||
124 | #define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) | ||
125 | #define _TIF_ASYNC_TLB (1<<TIF_ASYNC_TLB) | ||
126 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | ||
127 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | ||
128 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | ||
129 | #define _TIF_MEMDIE (1<<TIF_MEMDIE) | ||
130 | |||
131 | /* Work to do on any return to user space. */ | ||
132 | #define _TIF_ALLWORK_MASK \ | ||
133 | (_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SINGLESTEP|_TIF_ASYNC_TLB) | ||
134 | |||
135 | /* | ||
136 | * Thread-synchronous status. | ||
137 | * | ||
138 | * This is different from the flags in that nobody else | ||
139 | * ever touches our thread-synchronous status, so we don't | ||
140 | * have to worry about atomic accesses. | ||
141 | */ | ||
142 | #ifdef __tilegx__ | ||
143 | #define TS_COMPAT 0x0001 /* 32-bit compatibility mode */ | ||
144 | #endif | ||
145 | #define TS_POLLING 0x0004 /* in idle loop but not sleeping */ | ||
146 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */ | ||
147 | #define TS_EXEC_HASH_SET 0x0010 /* apply TS_EXEC_HASH_xxx flags */ | ||
148 | #define TS_EXEC_HASH_RO 0x0020 /* during exec, hash r/o segments */ | ||
149 | #define TS_EXEC_HASH_RW 0x0040 /* during exec, hash r/w segments */ | ||
150 | #define TS_EXEC_HASH_STACK 0x0080 /* during exec, hash the stack */ | ||
151 | #define TS_EXEC_HASH_FLAGS 0x00f0 /* mask for TS_EXEC_HASH_xxx flags */ | ||
152 | |||
153 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | ||
154 | |||
155 | #ifndef __ASSEMBLY__ | ||
156 | #define HAVE_SET_RESTORE_SIGMASK 1 | ||
157 | static inline void set_restore_sigmask(void) | ||
158 | { | ||
159 | struct thread_info *ti = current_thread_info(); | ||
160 | ti->status |= TS_RESTORE_SIGMASK; | ||
161 | set_bit(TIF_SIGPENDING, &ti->flags); | ||
162 | } | ||
163 | #endif /* !__ASSEMBLY__ */ | ||
164 | |||
165 | #endif /* _ASM_TILE_THREAD_INFO_H */ | ||
diff --git a/arch/tile/include/asm/timex.h b/arch/tile/include/asm/timex.h new file mode 100644 index 000000000000..3baf5fc4c0a1 --- /dev/null +++ b/arch/tile/include/asm/timex.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TIMEX_H | ||
16 | #define _ASM_TILE_TIMEX_H | ||
17 | |||
18 | /* | ||
19 | * This rate should be a multiple of the possible HZ values (100, 250, 1000) | ||
20 | * and a fraction of the possible hardware timer frequencies. Our timer | ||
21 | * frequency is highly tunable but also quite precise, so for the primary use | ||
22 | * of this value (setting ACT_HZ from HZ) we just pick a value that causes | ||
23 | * ACT_HZ to be set to HZ. We make the value somewhat large just to be | ||
24 | * more robust in case someone tries out a new value of HZ. | ||
25 | */ | ||
26 | #define CLOCK_TICK_RATE 1000000 | ||
27 | |||
28 | typedef unsigned long long cycles_t; | ||
29 | |||
30 | #if CHIP_HAS_SPLIT_CYCLE() | ||
31 | cycles_t get_cycles(void); | ||
32 | #else | ||
33 | static inline cycles_t get_cycles(void) | ||
34 | { | ||
35 | return __insn_mfspr(SPR_CYCLE); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | cycles_t get_clock_rate(void); | ||
40 | |||
41 | /* Called at cpu initialization to set some low-level constants. */ | ||
42 | void setup_clock(void); | ||
43 | |||
44 | /* Called at cpu initialization to start the tile-timer clock device. */ | ||
45 | void setup_tile_timer(void); | ||
46 | |||
47 | #endif /* _ASM_TILE_TIMEX_H */ | ||
diff --git a/arch/tile/include/asm/tlb.h b/arch/tile/include/asm/tlb.h new file mode 100644 index 000000000000..4a891a1a8df3 --- /dev/null +++ b/arch/tile/include/asm/tlb.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLB_H | ||
16 | #define _ASM_TILE_TLB_H | ||
17 | |||
18 | #define tlb_start_vma(tlb, vma) do { } while (0) | ||
19 | #define tlb_end_vma(tlb, vma) do { } while (0) | ||
20 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | ||
21 | #define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) | ||
22 | |||
23 | #include <asm-generic/tlb.h> | ||
24 | |||
25 | #endif /* _ASM_TILE_TLB_H */ | ||
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h new file mode 100644 index 000000000000..96199d214fb8 --- /dev/null +++ b/arch/tile/include/asm/tlbflush.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TLBFLUSH_H | ||
16 | #define _ASM_TILE_TLBFLUSH_H | ||
17 | |||
18 | #include <linux/mm.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <hv/hypervisor.h> | ||
24 | |||
25 | /* | ||
26 | * Rather than associating each mm with its own ASID, we just use | ||
27 | * ASIDs to allow us to lazily flush the TLB when we switch mms. | ||
28 | * This way we only have to do an actual TLB flush on mm switch | ||
29 | * every time we wrap ASIDs, not every single time we switch. | ||
30 | * | ||
31 | * FIXME: We might improve performance by keeping ASIDs around | ||
32 | * properly, though since the hypervisor direct-maps VAs to TSB | ||
33 | * entries, we're likely to have lost at least the executable page | ||
34 | * mappings by the time we switch back to the original mm. | ||
35 | */ | ||
36 | DECLARE_PER_CPU(int, current_asid); | ||
37 | |||
38 | /* The hypervisor tells us what ASIDs are available to us. */ | ||
39 | extern int min_asid, max_asid; | ||
40 | |||
41 | static inline unsigned long hv_page_size(const struct vm_area_struct *vma) | ||
42 | { | ||
43 | return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE; | ||
44 | } | ||
45 | |||
46 | /* Pass as vma pointer for non-executable mapping, if no vma available. */ | ||
47 | #define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL) | ||
48 | |||
49 | /* Flush a single user page on this cpu. */ | ||
50 | static inline void local_flush_tlb_page(const struct vm_area_struct *vma, | ||
51 | unsigned long addr, | ||
52 | unsigned long page_size) | ||
53 | { | ||
54 | int rc = hv_flush_page(addr, page_size); | ||
55 | if (rc < 0) | ||
56 | panic("hv_flush_page(%#lx,%#lx) failed: %d", | ||
57 | addr, page_size, rc); | ||
58 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
59 | __flush_icache(); | ||
60 | } | ||
61 | |||
62 | /* Flush range of user pages on this cpu. */ | ||
63 | static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, | ||
64 | unsigned long addr, | ||
65 | unsigned long page_size, | ||
66 | unsigned long len) | ||
67 | { | ||
68 | int rc = hv_flush_pages(addr, page_size, len); | ||
69 | if (rc < 0) | ||
70 | panic("hv_flush_pages(%#lx,%#lx,%#lx) failed: %d", | ||
71 | addr, page_size, len, rc); | ||
72 | if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC))) | ||
73 | __flush_icache(); | ||
74 | } | ||
75 | |||
76 | /* Flush all user pages on this cpu. */ | ||
77 | static inline void local_flush_tlb(void) | ||
78 | { | ||
79 | int rc = hv_flush_all(1); /* preserve global mappings */ | ||
80 | if (rc < 0) | ||
81 | panic("hv_flush_all(1) failed: %d", rc); | ||
82 | __flush_icache(); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Global pages have to be flushed a bit differently. Not a real | ||
87 | * performance problem because this does not happen often. | ||
88 | */ | ||
89 | static inline void local_flush_tlb_all(void) | ||
90 | { | ||
91 | int i; | ||
92 | for (i = 0; ; ++i) { | ||
93 | HV_VirtAddrRange r = hv_inquire_virtual(i); | ||
94 | if (r.size == 0) | ||
95 | break; | ||
96 | local_flush_tlb_pages(NULL, r.start, PAGE_SIZE, r.size); | ||
97 | local_flush_tlb_pages(NULL, r.start, HPAGE_SIZE, r.size); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * TLB flushing: | ||
103 | * | ||
104 | * - flush_tlb() flushes the current mm struct TLBs | ||
105 | * - flush_tlb_all() flushes all processes TLBs | ||
106 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | ||
107 | * - flush_tlb_page(vma, vmaddr) flushes one page | ||
108 | * - flush_tlb_range(vma, start, end) flushes a range of pages | ||
109 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | ||
110 | * - flush_tlb_others(cpumask, mm, va) flushes TLBs on other cpus | ||
111 | * | ||
112 | * Here (as in vm_area_struct), "end" means the first byte after | ||
113 | * our end address. | ||
114 | */ | ||
115 | |||
116 | extern void flush_tlb_all(void); | ||
117 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | ||
118 | extern void flush_tlb_current_task(void); | ||
119 | extern void flush_tlb_mm(struct mm_struct *); | ||
120 | extern void flush_tlb_page(const struct vm_area_struct *, unsigned long); | ||
121 | extern void flush_tlb_page_mm(const struct vm_area_struct *, | ||
122 | struct mm_struct *, unsigned long); | ||
123 | extern void flush_tlb_range(const struct vm_area_struct *, | ||
124 | unsigned long start, unsigned long end); | ||
125 | |||
126 | #define flush_tlb() flush_tlb_current_task() | ||
127 | |||
128 | #endif /* _ASM_TILE_TLBFLUSH_H */ | ||
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h new file mode 100644 index 000000000000..343172d422a9 --- /dev/null +++ b/arch/tile/include/asm/topology.h | |||
@@ -0,0 +1,85 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TOPOLOGY_H | ||
16 | #define _ASM_TILE_TOPOLOGY_H | ||
17 | |||
18 | #ifdef CONFIG_NUMA | ||
19 | |||
20 | #include <linux/cpumask.h> | ||
21 | |||
22 | /* Mappings between logical cpu number and node number. */ | ||
23 | extern struct cpumask node_2_cpu_mask[]; | ||
24 | extern char cpu_2_node[]; | ||
25 | |||
26 | /* Returns the number of the node containing CPU 'cpu'. */ | ||
27 | static inline int cpu_to_node(int cpu) | ||
28 | { | ||
29 | return cpu_2_node[cpu]; | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * Returns the number of the node containing Node 'node'. | ||
34 | * This architecture is flat, so it is a pretty simple function! | ||
35 | */ | ||
36 | #define parent_node(node) (node) | ||
37 | |||
38 | /* Returns a bitmask of CPUs on Node 'node'. */ | ||
39 | static inline const struct cpumask *cpumask_of_node(int node) | ||
40 | { | ||
41 | return &node_2_cpu_mask[node]; | ||
42 | } | ||
43 | |||
44 | /* For now, use numa node -1 for global allocation. */ | ||
45 | #define pcibus_to_node(bus) ((void)(bus), -1) | ||
46 | |||
47 | /* sched_domains SD_NODE_INIT for TILE architecture */ | ||
48 | #define SD_NODE_INIT (struct sched_domain) { \ | ||
49 | .min_interval = 8, \ | ||
50 | .max_interval = 32, \ | ||
51 | .busy_factor = 32, \ | ||
52 | .imbalance_pct = 125, \ | ||
53 | .cache_nice_tries = 1, \ | ||
54 | .busy_idx = 3, \ | ||
55 | .idle_idx = 1, \ | ||
56 | .newidle_idx = 2, \ | ||
57 | .wake_idx = 1, \ | ||
58 | .flags = SD_LOAD_BALANCE \ | ||
59 | | SD_BALANCE_NEWIDLE \ | ||
60 | | SD_BALANCE_EXEC \ | ||
61 | | SD_BALANCE_FORK \ | ||
62 | | SD_WAKE_AFFINE \ | ||
63 | | SD_SERIALIZE, \ | ||
64 | .last_balance = jiffies, \ | ||
65 | .balance_interval = 1, \ | ||
66 | } | ||
67 | |||
68 | /* By definition, we create nodes based on online memory. */ | ||
69 | #define node_has_online_mem(nid) 1 | ||
70 | |||
71 | #endif /* CONFIG_NUMA */ | ||
72 | |||
73 | #include <asm-generic/topology.h> | ||
74 | |||
75 | #ifdef CONFIG_SMP | ||
76 | #define topology_physical_package_id(cpu) ((void)(cpu), 0) | ||
77 | #define topology_core_id(cpu) (cpu) | ||
78 | #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) | ||
79 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | ||
80 | |||
81 | /* indicates that pointers to the topology struct cpumask maps are valid */ | ||
82 | #define arch_provides_topology_pointers yes | ||
83 | #endif | ||
84 | |||
85 | #endif /* _ASM_TILE_TOPOLOGY_H */ | ||
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h new file mode 100644 index 000000000000..eab33d4a917d --- /dev/null +++ b/arch/tile/include/asm/traps.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_TRAPS_H | ||
16 | #define _ASM_TILE_TRAPS_H | ||
17 | |||
18 | /* mm/fault.c */ | ||
19 | void do_page_fault(struct pt_regs *, int fault_num, | ||
20 | unsigned long address, unsigned long write); | ||
21 | |||
22 | /* kernel/traps.c */ | ||
23 | void do_trap(struct pt_regs *, int fault_num, unsigned long reason); | ||
24 | |||
25 | /* kernel/time.c */ | ||
26 | void do_timer_interrupt(struct pt_regs *, int fault_num); | ||
27 | |||
28 | /* kernel/messaging.c */ | ||
29 | void hv_message_intr(struct pt_regs *, int intnum); | ||
30 | |||
31 | /* kernel/irq.c */ | ||
32 | void tile_dev_intr(struct pt_regs *, int intnum); | ||
33 | |||
34 | |||
35 | |||
36 | #endif /* _ASM_TILE_SYSCALLS_H */ | ||
diff --git a/arch/tile/include/asm/types.h b/arch/tile/include/asm/types.h new file mode 100644 index 000000000000..b9e79bc580dd --- /dev/null +++ b/arch/tile/include/asm/types.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/types.h> | |||
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h new file mode 100644 index 000000000000..f3058afd5a88 --- /dev/null +++ b/arch/tile/include/asm/uaccess.h | |||
@@ -0,0 +1,578 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UACCESS_H | ||
16 | #define _ASM_TILE_UACCESS_H | ||
17 | |||
18 | /* | ||
19 | * User space memory access functions | ||
20 | */ | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <asm-generic/uaccess-unaligned.h> | ||
24 | #include <asm/processor.h> | ||
25 | #include <asm/page.h> | ||
26 | |||
27 | #define VERIFY_READ 0 | ||
28 | #define VERIFY_WRITE 1 | ||
29 | |||
30 | /* | ||
31 | * The fs value determines whether argument validity checking should be | ||
32 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
33 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
34 | * | ||
35 | * For historical reasons, these macros are grossly misnamed. | ||
36 | */ | ||
37 | #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) | ||
38 | |||
39 | #define KERNEL_DS MAKE_MM_SEG(-1UL) | ||
40 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
41 | |||
42 | #define get_ds() (KERNEL_DS) | ||
43 | #define get_fs() (current_thread_info()->addr_limit) | ||
44 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
45 | |||
46 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
47 | |||
48 | #ifndef __tilegx__ | ||
49 | /* | ||
50 | * We could allow mapping all 16 MB at 0xfc000000, but we set up a | ||
51 | * special hack in arch_setup_additional_pages() to auto-create a mapping | ||
52 | * for the first 16 KB, and it would seem strange to have different | ||
53 | * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000. | ||
54 | */ | ||
55 | static inline int is_arch_mappable_range(unsigned long addr, | ||
56 | unsigned long size) | ||
57 | { | ||
58 | return (addr >= MEM_USER_INTRPT && | ||
59 | addr < (MEM_USER_INTRPT + INTRPT_SIZE) && | ||
60 | size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr); | ||
61 | } | ||
62 | #define is_arch_mappable_range is_arch_mappable_range | ||
63 | #else | ||
64 | #define is_arch_mappable_range(addr, size) 0 | ||
65 | #endif | ||
66 | |||
67 | /* | ||
68 | * Test whether a block of memory is a valid user space address. | ||
69 | * Returns 0 if the range is valid, nonzero otherwise. | ||
70 | */ | ||
71 | int __range_ok(unsigned long addr, unsigned long size); | ||
72 | |||
73 | /** | ||
74 | * access_ok: - Checks if a user space pointer is valid | ||
75 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
76 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
77 | * to write to a block, it is always safe to read from it. | ||
78 | * @addr: User space pointer to start of block to check | ||
79 | * @size: Size of block to check | ||
80 | * | ||
81 | * Context: User context only. This function may sleep. | ||
82 | * | ||
83 | * Checks if a pointer to a block of memory in user space is valid. | ||
84 | * | ||
85 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
86 | * if it is definitely invalid. | ||
87 | * | ||
88 | * Note that, depending on architecture, this function probably just | ||
89 | * checks that the pointer is in the user space range - after calling | ||
90 | * this function, memory access functions may still return -EFAULT. | ||
91 | */ | ||
92 | #define access_ok(type, addr, size) \ | ||
93 | (likely(__range_ok((unsigned long)addr, size) == 0)) | ||
94 | |||
95 | /* | ||
96 | * The exception table consists of pairs of addresses: the first is the | ||
97 | * address of an instruction that is allowed to fault, and the second is | ||
98 | * the address at which the program should continue. No registers are | ||
99 | * modified, so it is entirely up to the continuation code to figure out | ||
100 | * what to do. | ||
101 | * | ||
102 | * All the routines below use bits of fixup code that are out of line | ||
103 | * with the main instruction path. This means when everything is well, | ||
104 | * we don't even have to jump over them. Further, they do not intrude | ||
105 | * on our cache or tlb entries. | ||
106 | */ | ||
107 | |||
108 | struct exception_table_entry { | ||
109 | unsigned long insn, fixup; | ||
110 | }; | ||
111 | |||
112 | extern int fixup_exception(struct pt_regs *regs); | ||
113 | |||
114 | /* | ||
115 | * We return the __get_user_N function results in a structure, | ||
116 | * thus in r0 and r1. If "err" is zero, "val" is the result | ||
117 | * of the read; otherwise, "err" is -EFAULT. | ||
118 | * | ||
119 | * We rarely need 8-byte values on a 32-bit architecture, but | ||
120 | * we size the structure to accommodate. In practice, for the | ||
121 | * the smaller reads, we can zero the high word for free, and | ||
122 | * the caller will ignore it by virtue of casting anyway. | ||
123 | */ | ||
124 | struct __get_user { | ||
125 | unsigned long long val; | ||
126 | int err; | ||
127 | }; | ||
128 | |||
129 | /* | ||
130 | * FIXME: we should express these as inline extended assembler, since | ||
131 | * they're fundamentally just a variable dereference and some | ||
132 | * supporting exception_table gunk. Note that (a la i386) we can | ||
133 | * extend the copy_to_user and copy_from_user routines to call into | ||
134 | * such extended assembler routines, though we will have to use a | ||
135 | * different return code in that case (1, 2, or 4, rather than -EFAULT). | ||
136 | */ | ||
137 | extern struct __get_user __get_user_1(const void *); | ||
138 | extern struct __get_user __get_user_2(const void *); | ||
139 | extern struct __get_user __get_user_4(const void *); | ||
140 | extern struct __get_user __get_user_8(const void *); | ||
141 | extern int __put_user_1(long, void *); | ||
142 | extern int __put_user_2(long, void *); | ||
143 | extern int __put_user_4(long, void *); | ||
144 | extern int __put_user_8(long long, void *); | ||
145 | |||
146 | /* Unimplemented routines to cause linker failures */ | ||
147 | extern struct __get_user __get_user_bad(void); | ||
148 | extern int __put_user_bad(void); | ||
149 | |||
150 | /* | ||
151 | * Careful: we have to cast the result to the type of the pointer | ||
152 | * for sign reasons. | ||
153 | */ | ||
154 | /** | ||
155 | * __get_user: - Get a simple variable from user space, with less checking. | ||
156 | * @x: Variable to store result. | ||
157 | * @ptr: Source address, in user space. | ||
158 | * | ||
159 | * Context: User context only. This function may sleep. | ||
160 | * | ||
161 | * This macro copies a single simple variable from user space to kernel | ||
162 | * space. It supports simple types like char and int, but not larger | ||
163 | * data types like structures or arrays. | ||
164 | * | ||
165 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
166 | * dereferencing @ptr must be assignable to @x without a cast. | ||
167 | * | ||
168 | * Returns zero on success, or -EFAULT on error. | ||
169 | * On error, the variable @x is set to zero. | ||
170 | * | ||
171 | * Caller must check the pointer with access_ok() before calling this | ||
172 | * function. | ||
173 | */ | ||
174 | #define __get_user(x, ptr) \ | ||
175 | ({ struct __get_user __ret; \ | ||
176 | __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ | ||
177 | __chk_user_ptr(__gu_addr); \ | ||
178 | switch (sizeof(*(__gu_addr))) { \ | ||
179 | case 1: \ | ||
180 | __ret = __get_user_1(__gu_addr); \ | ||
181 | break; \ | ||
182 | case 2: \ | ||
183 | __ret = __get_user_2(__gu_addr); \ | ||
184 | break; \ | ||
185 | case 4: \ | ||
186 | __ret = __get_user_4(__gu_addr); \ | ||
187 | break; \ | ||
188 | case 8: \ | ||
189 | __ret = __get_user_8(__gu_addr); \ | ||
190 | break; \ | ||
191 | default: \ | ||
192 | __ret = __get_user_bad(); \ | ||
193 | break; \ | ||
194 | } \ | ||
195 | (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ | ||
196 | __ret.val; \ | ||
197 | __ret.err; \ | ||
198 | }) | ||
199 | |||
200 | /** | ||
201 | * __put_user: - Write a simple value into user space, with less checking. | ||
202 | * @x: Value to copy to user space. | ||
203 | * @ptr: Destination address, in user space. | ||
204 | * | ||
205 | * Context: User context only. This function may sleep. | ||
206 | * | ||
207 | * This macro copies a single simple value from kernel space to user | ||
208 | * space. It supports simple types like char and int, but not larger | ||
209 | * data types like structures or arrays. | ||
210 | * | ||
211 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
212 | * to the result of dereferencing @ptr. | ||
213 | * | ||
214 | * Caller must check the pointer with access_ok() before calling this | ||
215 | * function. | ||
216 | * | ||
217 | * Returns zero on success, or -EFAULT on error. | ||
218 | * | ||
219 | * Implementation note: The "case 8" logic of casting to the type of | ||
220 | * the result of subtracting the value from itself is basically a way | ||
221 | * of keeping all integer types the same, but casting any pointers to | ||
222 | * ptrdiff_t, i.e. also an integer type. This way there are no | ||
223 | * questionable casts seen by the compiler on an ILP32 platform. | ||
224 | */ | ||
225 | #define __put_user(x, ptr) \ | ||
226 | ({ \ | ||
227 | int __pu_err = 0; \ | ||
228 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | ||
229 | typeof(*__pu_addr) __pu_val = (x); \ | ||
230 | __chk_user_ptr(__pu_addr); \ | ||
231 | switch (sizeof(__pu_val)) { \ | ||
232 | case 1: \ | ||
233 | __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ | ||
234 | break; \ | ||
235 | case 2: \ | ||
236 | __pu_err = __put_user_2((long)__pu_val, __pu_addr); \ | ||
237 | break; \ | ||
238 | case 4: \ | ||
239 | __pu_err = __put_user_4((long)__pu_val, __pu_addr); \ | ||
240 | break; \ | ||
241 | case 8: \ | ||
242 | __pu_err = \ | ||
243 | __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\ | ||
244 | __pu_addr); \ | ||
245 | break; \ | ||
246 | default: \ | ||
247 | __pu_err = __put_user_bad(); \ | ||
248 | break; \ | ||
249 | } \ | ||
250 | __pu_err; \ | ||
251 | }) | ||
252 | |||
253 | /* | ||
254 | * The versions of get_user and put_user without initial underscores | ||
255 | * check the address of their arguments to make sure they are not | ||
256 | * in kernel space. | ||
257 | */ | ||
258 | #define put_user(x, ptr) \ | ||
259 | ({ \ | ||
260 | __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \ | ||
261 | access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \ | ||
262 | __put_user((x), (__Pu_addr)) : \ | ||
263 | -EFAULT; \ | ||
264 | }) | ||
265 | |||
266 | #define get_user(x, ptr) \ | ||
267 | ({ \ | ||
268 | __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \ | ||
269 | access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \ | ||
270 | __get_user((x), (__Gu_addr)) : \ | ||
271 | ((x) = 0, -EFAULT); \ | ||
272 | }) | ||
273 | |||
274 | /** | ||
275 | * __copy_to_user() - copy data into user space, with less checking. | ||
276 | * @to: Destination address, in user space. | ||
277 | * @from: Source address, in kernel space. | ||
278 | * @n: Number of bytes to copy. | ||
279 | * | ||
280 | * Context: User context only. This function may sleep. | ||
281 | * | ||
282 | * Copy data from kernel space to user space. Caller must check | ||
283 | * the specified block with access_ok() before calling this function. | ||
284 | * | ||
285 | * Returns number of bytes that could not be copied. | ||
286 | * On success, this will be zero. | ||
287 | * | ||
288 | * An alternate version - __copy_to_user_inatomic() - is designed | ||
289 | * to be called from atomic context, typically bracketed by calls | ||
290 | * to pagefault_disable() and pagefault_enable(). | ||
291 | */ | ||
292 | extern unsigned long __must_check __copy_to_user_inatomic( | ||
293 | void __user *to, const void *from, unsigned long n); | ||
294 | |||
295 | static inline unsigned long __must_check | ||
296 | __copy_to_user(void __user *to, const void *from, unsigned long n) | ||
297 | { | ||
298 | might_fault(); | ||
299 | return __copy_to_user_inatomic(to, from, n); | ||
300 | } | ||
301 | |||
302 | static inline unsigned long __must_check | ||
303 | copy_to_user(void __user *to, const void *from, unsigned long n) | ||
304 | { | ||
305 | if (access_ok(VERIFY_WRITE, to, n)) | ||
306 | n = __copy_to_user(to, from, n); | ||
307 | return n; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * __copy_from_user() - copy data from user space, with less checking. | ||
312 | * @to: Destination address, in kernel space. | ||
313 | * @from: Source address, in user space. | ||
314 | * @n: Number of bytes to copy. | ||
315 | * | ||
316 | * Context: User context only. This function may sleep. | ||
317 | * | ||
318 | * Copy data from user space to kernel space. Caller must check | ||
319 | * the specified block with access_ok() before calling this function. | ||
320 | * | ||
321 | * Returns number of bytes that could not be copied. | ||
322 | * On success, this will be zero. | ||
323 | * | ||
324 | * If some data could not be copied, this function will pad the copied | ||
325 | * data to the requested size using zero bytes. | ||
326 | * | ||
327 | * An alternate version - __copy_from_user_inatomic() - is designed | ||
328 | * to be called from atomic context, typically bracketed by calls | ||
329 | * to pagefault_disable() and pagefault_enable(). This version | ||
330 | * does *NOT* pad with zeros. | ||
331 | */ | ||
332 | extern unsigned long __must_check __copy_from_user_inatomic( | ||
333 | void *to, const void __user *from, unsigned long n); | ||
334 | extern unsigned long __must_check __copy_from_user_zeroing( | ||
335 | void *to, const void __user *from, unsigned long n); | ||
336 | |||
337 | static inline unsigned long __must_check | ||
338 | __copy_from_user(void *to, const void __user *from, unsigned long n) | ||
339 | { | ||
340 | might_fault(); | ||
341 | return __copy_from_user_zeroing(to, from, n); | ||
342 | } | ||
343 | |||
344 | static inline unsigned long __must_check | ||
345 | _copy_from_user(void *to, const void __user *from, unsigned long n) | ||
346 | { | ||
347 | if (access_ok(VERIFY_READ, from, n)) | ||
348 | n = __copy_from_user(to, from, n); | ||
349 | else | ||
350 | memset(to, 0, n); | ||
351 | return n; | ||
352 | } | ||
353 | |||
354 | #ifdef CONFIG_DEBUG_COPY_FROM_USER | ||
355 | extern void copy_from_user_overflow(void) | ||
356 | __compiletime_warning("copy_from_user() size is not provably correct"); | ||
357 | |||
358 | static inline unsigned long __must_check copy_from_user(void *to, | ||
359 | const void __user *from, | ||
360 | unsigned long n) | ||
361 | { | ||
362 | int sz = __compiletime_object_size(to); | ||
363 | |||
364 | if (likely(sz == -1 || sz >= n)) | ||
365 | n = _copy_from_user(to, from, n); | ||
366 | else | ||
367 | copy_from_user_overflow(); | ||
368 | |||
369 | return n; | ||
370 | } | ||
371 | #else | ||
372 | #define copy_from_user _copy_from_user | ||
373 | #endif | ||
374 | |||
375 | #ifdef __tilegx__ | ||
376 | /** | ||
377 | * __copy_in_user() - copy data within user space, with less checking. | ||
378 | * @to: Destination address, in user space. | ||
379 | * @from: Source address, in kernel space. | ||
380 | * @n: Number of bytes to copy. | ||
381 | * | ||
382 | * Context: User context only. This function may sleep. | ||
383 | * | ||
384 | * Copy data from user space to user space. Caller must check | ||
385 | * the specified blocks with access_ok() before calling this function. | ||
386 | * | ||
387 | * Returns number of bytes that could not be copied. | ||
388 | * On success, this will be zero. | ||
389 | */ | ||
390 | extern unsigned long __copy_in_user_asm( | ||
391 | void __user *to, const void __user *from, unsigned long n); | ||
392 | |||
393 | static inline unsigned long __must_check | ||
394 | __copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
395 | { | ||
396 | might_sleep(); | ||
397 | return __copy_in_user_asm(to, from, n); | ||
398 | } | ||
399 | |||
400 | static inline unsigned long __must_check | ||
401 | copy_in_user(void __user *to, const void __user *from, unsigned long n) | ||
402 | { | ||
403 | if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) | ||
404 | n = __copy_in_user(to, from, n); | ||
405 | return n; | ||
406 | } | ||
407 | #endif | ||
408 | |||
409 | |||
410 | /** | ||
411 | * strlen_user: - Get the size of a string in user space. | ||
412 | * @str: The string to measure. | ||
413 | * | ||
414 | * Context: User context only. This function may sleep. | ||
415 | * | ||
416 | * Get the size of a NUL-terminated string in user space. | ||
417 | * | ||
418 | * Returns the size of the string INCLUDING the terminating NUL. | ||
419 | * On exception, returns 0. | ||
420 | * | ||
421 | * If there is a limit on the length of a valid string, you may wish to | ||
422 | * consider using strnlen_user() instead. | ||
423 | */ | ||
424 | extern long strnlen_user_asm(const char __user *str, long n); | ||
425 | static inline long __must_check strnlen_user(const char __user *str, long n) | ||
426 | { | ||
427 | might_fault(); | ||
428 | return strnlen_user_asm(str, n); | ||
429 | } | ||
430 | #define strlen_user(str) strnlen_user(str, LONG_MAX) | ||
431 | |||
432 | /** | ||
433 | * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | ||
434 | * @dst: Destination address, in kernel space. This buffer must be at | ||
435 | * least @count bytes long. | ||
436 | * @src: Source address, in user space. | ||
437 | * @count: Maximum number of bytes to copy, including the trailing NUL. | ||
438 | * | ||
439 | * Copies a NUL-terminated string from userspace to kernel space. | ||
440 | * Caller must check the specified block with access_ok() before calling | ||
441 | * this function. | ||
442 | * | ||
443 | * On success, returns the length of the string (not including the trailing | ||
444 | * NUL). | ||
445 | * | ||
446 | * If access to userspace fails, returns -EFAULT (some data may have been | ||
447 | * copied). | ||
448 | * | ||
449 | * If @count is smaller than the length of the string, copies @count bytes | ||
450 | * and returns @count. | ||
451 | */ | ||
452 | extern long strncpy_from_user_asm(char *dst, const char __user *src, long); | ||
453 | static inline long __must_check __strncpy_from_user( | ||
454 | char *dst, const char __user *src, long count) | ||
455 | { | ||
456 | might_fault(); | ||
457 | return strncpy_from_user_asm(dst, src, count); | ||
458 | } | ||
459 | static inline long __must_check strncpy_from_user( | ||
460 | char *dst, const char __user *src, long count) | ||
461 | { | ||
462 | if (access_ok(VERIFY_READ, src, 1)) | ||
463 | return __strncpy_from_user(dst, src, count); | ||
464 | return -EFAULT; | ||
465 | } | ||
466 | |||
467 | /** | ||
468 | * clear_user: - Zero a block of memory in user space. | ||
469 | * @mem: Destination address, in user space. | ||
470 | * @len: Number of bytes to zero. | ||
471 | * | ||
472 | * Zero a block of memory in user space. | ||
473 | * | ||
474 | * Returns number of bytes that could not be cleared. | ||
475 | * On success, this will be zero. | ||
476 | */ | ||
477 | extern unsigned long clear_user_asm(void __user *mem, unsigned long len); | ||
478 | static inline unsigned long __must_check __clear_user( | ||
479 | void __user *mem, unsigned long len) | ||
480 | { | ||
481 | might_fault(); | ||
482 | return clear_user_asm(mem, len); | ||
483 | } | ||
484 | static inline unsigned long __must_check clear_user( | ||
485 | void __user *mem, unsigned long len) | ||
486 | { | ||
487 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
488 | return __clear_user(mem, len); | ||
489 | return len; | ||
490 | } | ||
491 | |||
492 | /** | ||
493 | * flush_user: - Flush a block of memory in user space from cache. | ||
494 | * @mem: Destination address, in user space. | ||
495 | * @len: Number of bytes to flush. | ||
496 | * | ||
497 | * Returns number of bytes that could not be flushed. | ||
498 | * On success, this will be zero. | ||
499 | */ | ||
500 | extern unsigned long flush_user_asm(void __user *mem, unsigned long len); | ||
501 | static inline unsigned long __must_check __flush_user( | ||
502 | void __user *mem, unsigned long len) | ||
503 | { | ||
504 | int retval; | ||
505 | |||
506 | might_fault(); | ||
507 | retval = flush_user_asm(mem, len); | ||
508 | mb_incoherent(); | ||
509 | return retval; | ||
510 | } | ||
511 | |||
512 | static inline unsigned long __must_check flush_user( | ||
513 | void __user *mem, unsigned long len) | ||
514 | { | ||
515 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
516 | return __flush_user(mem, len); | ||
517 | return len; | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * inv_user: - Invalidate a block of memory in user space from cache. | ||
522 | * @mem: Destination address, in user space. | ||
523 | * @len: Number of bytes to invalidate. | ||
524 | * | ||
525 | * Returns number of bytes that could not be invalidated. | ||
526 | * On success, this will be zero. | ||
527 | * | ||
528 | * Note that on Tile64, the "inv" operation is in fact a | ||
529 | * "flush and invalidate", so cache write-backs will occur prior | ||
530 | * to the cache being marked invalid. | ||
531 | */ | ||
532 | extern unsigned long inv_user_asm(void __user *mem, unsigned long len); | ||
533 | static inline unsigned long __must_check __inv_user( | ||
534 | void __user *mem, unsigned long len) | ||
535 | { | ||
536 | int retval; | ||
537 | |||
538 | might_fault(); | ||
539 | retval = inv_user_asm(mem, len); | ||
540 | mb_incoherent(); | ||
541 | return retval; | ||
542 | } | ||
543 | static inline unsigned long __must_check inv_user( | ||
544 | void __user *mem, unsigned long len) | ||
545 | { | ||
546 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
547 | return __inv_user(mem, len); | ||
548 | return len; | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * finv_user: - Flush-inval a block of memory in user space from cache. | ||
553 | * @mem: Destination address, in user space. | ||
554 | * @len: Number of bytes to invalidate. | ||
555 | * | ||
556 | * Returns number of bytes that could not be flush-invalidated. | ||
557 | * On success, this will be zero. | ||
558 | */ | ||
559 | extern unsigned long finv_user_asm(void __user *mem, unsigned long len); | ||
560 | static inline unsigned long __must_check __finv_user( | ||
561 | void __user *mem, unsigned long len) | ||
562 | { | ||
563 | int retval; | ||
564 | |||
565 | might_fault(); | ||
566 | retval = finv_user_asm(mem, len); | ||
567 | mb_incoherent(); | ||
568 | return retval; | ||
569 | } | ||
570 | static inline unsigned long __must_check finv_user( | ||
571 | void __user *mem, unsigned long len) | ||
572 | { | ||
573 | if (access_ok(VERIFY_WRITE, mem, len)) | ||
574 | return __finv_user(mem, len); | ||
575 | return len; | ||
576 | } | ||
577 | |||
578 | #endif /* _ASM_TILE_UACCESS_H */ | ||
diff --git a/arch/tile/include/asm/ucontext.h b/arch/tile/include/asm/ucontext.h new file mode 100644 index 000000000000..9bc07b9f30fb --- /dev/null +++ b/arch/tile/include/asm/ucontext.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/ucontext.h> | |||
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h new file mode 100644 index 000000000000..137e2de5b102 --- /dev/null +++ b/arch/tile/include/asm/unaligned.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #ifndef _ASM_TILE_UNALIGNED_H | ||
16 | #define _ASM_TILE_UNALIGNED_H | ||
17 | |||
18 | #include <linux/unaligned/le_struct.h> | ||
19 | #include <linux/unaligned/be_byteshift.h> | ||
20 | #include <linux/unaligned/generic.h> | ||
21 | #define get_unaligned __get_unaligned_le | ||
22 | #define put_unaligned __put_unaligned_le | ||
23 | |||
24 | #endif /* _ASM_TILE_UNALIGNED_H */ | ||
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h new file mode 100644 index 000000000000..03b3d5d665dd --- /dev/null +++ b/arch/tile/include/asm/unistd.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #if !defined(_ASM_TILE_UNISTD_H) || defined(__SYSCALL) | ||
16 | #define _ASM_TILE_UNISTD_H | ||
17 | |||
18 | |||
19 | #ifndef __LP64__ | ||
20 | /* Use the flavor of this syscall that matches the 32-bit API better. */ | ||
21 | #define __ARCH_WANT_SYNC_FILE_RANGE2 | ||
22 | #endif | ||
23 | |||
24 | /* Use the standard ABI for syscalls. */ | ||
25 | #include <asm-generic/unistd.h> | ||
26 | |||
27 | #ifndef __tilegx__ | ||
28 | /* "Fast" syscalls provide atomic support for 32-bit chips. */ | ||
29 | #define __NR_FAST_cmpxchg -1 | ||
30 | #define __NR_FAST_atomic_update -2 | ||
31 | #define __NR_FAST_cmpxchg64 -3 | ||
32 | #define __NR_cmpxchg_badaddr (__NR_arch_specific_syscall + 0) | ||
33 | __SYSCALL(__NR_cmpxchg_badaddr, sys_cmpxchg_badaddr) | ||
34 | #endif | ||
35 | |||
36 | /* Additional Tilera-specific syscalls. */ | ||
37 | #define __NR_flush_cache (__NR_arch_specific_syscall + 1) | ||
38 | __SYSCALL(__NR_flush_cache, sys_flush_cache) | ||
39 | |||
40 | #ifdef __KERNEL__ | ||
41 | /* In compat mode, we use sys_llseek() for compat_sys_llseek(). */ | ||
42 | #ifdef CONFIG_COMPAT | ||
43 | #define __ARCH_WANT_SYS_LLSEEK | ||
44 | #endif | ||
45 | #endif | ||
46 | |||
47 | #endif /* _ASM_TILE_UNISTD_H */ | ||
diff --git a/arch/tile/include/asm/user.h b/arch/tile/include/asm/user.h new file mode 100644 index 000000000000..cbc8b4d5a5ce --- /dev/null +++ b/arch/tile/include/asm/user.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _ASM_TILE_USER_H | ||
17 | #define _ASM_TILE_USER_H | ||
18 | |||
19 | /* This header is for a.out file formats, which TILE does not support. */ | ||
20 | |||
21 | #endif /* _ASM_TILE_USER_H */ | ||
diff --git a/arch/tile/include/asm/xor.h b/arch/tile/include/asm/xor.h new file mode 100644 index 000000000000..c82eb12a5b18 --- /dev/null +++ b/arch/tile/include/asm/xor.h | |||
@@ -0,0 +1 @@ | |||
#include <asm-generic/xor.h> | |||
diff --git a/arch/tile/include/hv/drv_pcie_rc_intf.h b/arch/tile/include/hv/drv_pcie_rc_intf.h new file mode 100644 index 000000000000..9bd2243bece0 --- /dev/null +++ b/arch/tile/include/hv/drv_pcie_rc_intf.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file drv_pcie_rc_intf.h | ||
17 | * Interface definitions for the PCIE Root Complex. | ||
18 | */ | ||
19 | |||
20 | #ifndef _SYS_HV_DRV_PCIE_RC_INTF_H | ||
21 | #define _SYS_HV_DRV_PCIE_RC_INTF_H | ||
22 | |||
23 | /** File offset for reading the interrupt base number used for PCIE legacy | ||
24 | interrupts and PLX Gen 1 requirement flag */ | ||
25 | #define PCIE_RC_CONFIG_MASK_OFF 0 | ||
26 | |||
27 | |||
28 | /** | ||
29 | * Structure used for obtaining PCIe config information, read from the PCIE | ||
30 | * subsystem /ctl file at initialization | ||
31 | */ | ||
32 | typedef struct pcie_rc_config | ||
33 | { | ||
34 | int intr; /**< interrupt number used for downcall */ | ||
35 | int plx_gen1; /**< flag for PLX Gen 1 configuration */ | ||
36 | } pcie_rc_config_t; | ||
37 | |||
38 | #endif /* _SYS_HV_DRV_PCIE_RC_INTF_H */ | ||
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h new file mode 100644 index 000000000000..84b31551080a --- /dev/null +++ b/arch/tile/include/hv/hypervisor.h | |||
@@ -0,0 +1,2366 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file hypervisor.h | ||
17 | * The hypervisor's public API. | ||
18 | */ | ||
19 | |||
20 | #ifndef _TILE_HV_H | ||
21 | #define _TILE_HV_H | ||
22 | |||
23 | #ifdef __tile__ | ||
24 | #include <arch/chip.h> | ||
25 | #else | ||
26 | /* HACK: Allow use by "tools/cpack/". */ | ||
27 | #include "install/include/arch/chip.h" | ||
28 | #endif | ||
29 | |||
30 | /* Linux builds want unsigned long constants, but assembler wants numbers */ | ||
31 | #ifdef __ASSEMBLER__ | ||
32 | /** One, for assembler */ | ||
33 | #define __HV_SIZE_ONE 1 | ||
34 | #elif !defined(__tile__) && CHIP_VA_WIDTH() > 32 | ||
35 | /** One, for 64-bit on host */ | ||
36 | #define __HV_SIZE_ONE 1ULL | ||
37 | #else | ||
38 | /** One, for Linux */ | ||
39 | #define __HV_SIZE_ONE 1UL | ||
40 | #endif | ||
41 | |||
42 | |||
43 | /** The log2 of the span of a level-1 page table, in bytes. | ||
44 | */ | ||
45 | #define HV_LOG2_L1_SPAN 32 | ||
46 | |||
47 | /** The span of a level-1 page table, in bytes. | ||
48 | */ | ||
49 | #define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) | ||
50 | |||
51 | /** The log2 of the size of small pages, in bytes. This value should | ||
52 | * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). | ||
53 | */ | ||
54 | #define HV_LOG2_PAGE_SIZE_SMALL 16 | ||
55 | |||
56 | /** The size of small pages, in bytes. This value should be verified | ||
57 | * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). | ||
58 | */ | ||
59 | #define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) | ||
60 | |||
61 | /** The log2 of the size of large pages, in bytes. This value should be | ||
62 | * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). | ||
63 | */ | ||
64 | #define HV_LOG2_PAGE_SIZE_LARGE 24 | ||
65 | |||
66 | /** The size of large pages, in bytes. This value should be verified | ||
67 | * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). | ||
68 | */ | ||
69 | #define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) | ||
70 | |||
71 | /** The log2 of the granularity at which page tables must be aligned; | ||
72 | * in other words, the CPA for a page table must have this many zero | ||
73 | * bits at the bottom of the address. | ||
74 | */ | ||
75 | #define HV_LOG2_PAGE_TABLE_ALIGN 11 | ||
76 | |||
77 | /** The granularity at which page tables must be aligned. | ||
78 | */ | ||
79 | #define HV_PAGE_TABLE_ALIGN (__HV_SIZE_ONE << HV_LOG2_PAGE_TABLE_ALIGN) | ||
80 | |||
81 | /** Normal start of hypervisor glue in client physical memory. */ | ||
82 | #define HV_GLUE_START_CPA 0x10000 | ||
83 | |||
84 | /** This much space is reserved at HV_GLUE_START_CPA | ||
85 | * for the hypervisor glue. The client program must start at | ||
86 | * some address higher than this, and in particular the address of | ||
87 | * its text section should be equal to zero modulo HV_PAGE_SIZE_LARGE | ||
88 | * so that relative offsets to the HV glue are correct. | ||
89 | */ | ||
90 | #define HV_GLUE_RESERVED_SIZE 0x10000 | ||
91 | |||
92 | /** Each entry in the hv dispatch array takes this many bytes. */ | ||
93 | #define HV_DISPATCH_ENTRY_SIZE 32 | ||
94 | |||
95 | /** Version of the hypervisor interface defined by this file */ | ||
96 | #define _HV_VERSION 10 | ||
97 | |||
98 | /* Index into hypervisor interface dispatch code blocks. | ||
99 | * | ||
100 | * Hypervisor calls are invoked from user space by calling code | ||
101 | * at an address HV_BASE_ADDRESS + (index) * HV_DISPATCH_ENTRY_SIZE, | ||
102 | * where index is one of these enum values. | ||
103 | * | ||
104 | * Normally a supervisor is expected to produce a set of symbols | ||
105 | * starting at HV_BASE_ADDRESS that obey this convention, but a user | ||
106 | * program could call directly through function pointers if desired. | ||
107 | * | ||
108 | * These numbers are part of the binary API and will not be changed | ||
109 | * without updating HV_VERSION, which should be a rare event. | ||
110 | */ | ||
111 | |||
112 | /** reserved. */ | ||
113 | #define _HV_DISPATCH_RESERVED 0 | ||
114 | |||
115 | /** hv_init */ | ||
116 | #define HV_DISPATCH_INIT 1 | ||
117 | |||
118 | /** hv_install_context */ | ||
119 | #define HV_DISPATCH_INSTALL_CONTEXT 2 | ||
120 | |||
121 | /** hv_sysconf */ | ||
122 | #define HV_DISPATCH_SYSCONF 3 | ||
123 | |||
124 | /** hv_get_rtc */ | ||
125 | #define HV_DISPATCH_GET_RTC 4 | ||
126 | |||
127 | /** hv_set_rtc */ | ||
128 | #define HV_DISPATCH_SET_RTC 5 | ||
129 | |||
130 | /** hv_flush_asid */ | ||
131 | #define HV_DISPATCH_FLUSH_ASID 6 | ||
132 | |||
133 | /** hv_flush_page */ | ||
134 | #define HV_DISPATCH_FLUSH_PAGE 7 | ||
135 | |||
136 | /** hv_flush_pages */ | ||
137 | #define HV_DISPATCH_FLUSH_PAGES 8 | ||
138 | |||
139 | /** hv_restart */ | ||
140 | #define HV_DISPATCH_RESTART 9 | ||
141 | |||
142 | /** hv_halt */ | ||
143 | #define HV_DISPATCH_HALT 10 | ||
144 | |||
145 | /** hv_power_off */ | ||
146 | #define HV_DISPATCH_POWER_OFF 11 | ||
147 | |||
148 | /** hv_inquire_physical */ | ||
149 | #define HV_DISPATCH_INQUIRE_PHYSICAL 12 | ||
150 | |||
151 | /** hv_inquire_memory_controller */ | ||
152 | #define HV_DISPATCH_INQUIRE_MEMORY_CONTROLLER 13 | ||
153 | |||
154 | /** hv_inquire_virtual */ | ||
155 | #define HV_DISPATCH_INQUIRE_VIRTUAL 14 | ||
156 | |||
157 | /** hv_inquire_asid */ | ||
158 | #define HV_DISPATCH_INQUIRE_ASID 15 | ||
159 | |||
160 | /** hv_nanosleep */ | ||
161 | #define HV_DISPATCH_NANOSLEEP 16 | ||
162 | |||
163 | /** hv_console_read_if_ready */ | ||
164 | #define HV_DISPATCH_CONSOLE_READ_IF_READY 17 | ||
165 | |||
166 | /** hv_console_write */ | ||
167 | #define HV_DISPATCH_CONSOLE_WRITE 18 | ||
168 | |||
169 | /** hv_downcall_dispatch */ | ||
170 | #define HV_DISPATCH_DOWNCALL_DISPATCH 19 | ||
171 | |||
172 | /** hv_inquire_topology */ | ||
173 | #define HV_DISPATCH_INQUIRE_TOPOLOGY 20 | ||
174 | |||
175 | /** hv_fs_findfile */ | ||
176 | #define HV_DISPATCH_FS_FINDFILE 21 | ||
177 | |||
178 | /** hv_fs_fstat */ | ||
179 | #define HV_DISPATCH_FS_FSTAT 22 | ||
180 | |||
181 | /** hv_fs_pread */ | ||
182 | #define HV_DISPATCH_FS_PREAD 23 | ||
183 | |||
184 | /** hv_physaddr_read64 */ | ||
185 | #define HV_DISPATCH_PHYSADDR_READ64 24 | ||
186 | |||
187 | /** hv_physaddr_write64 */ | ||
188 | #define HV_DISPATCH_PHYSADDR_WRITE64 25 | ||
189 | |||
190 | /** hv_get_command_line */ | ||
191 | #define HV_DISPATCH_GET_COMMAND_LINE 26 | ||
192 | |||
193 | /** hv_set_caching */ | ||
194 | #define HV_DISPATCH_SET_CACHING 27 | ||
195 | |||
196 | /** hv_bzero_page */ | ||
197 | #define HV_DISPATCH_BZERO_PAGE 28 | ||
198 | |||
199 | /** hv_register_message_state */ | ||
200 | #define HV_DISPATCH_REGISTER_MESSAGE_STATE 29 | ||
201 | |||
202 | /** hv_send_message */ | ||
203 | #define HV_DISPATCH_SEND_MESSAGE 30 | ||
204 | |||
205 | /** hv_receive_message */ | ||
206 | #define HV_DISPATCH_RECEIVE_MESSAGE 31 | ||
207 | |||
208 | /** hv_inquire_context */ | ||
209 | #define HV_DISPATCH_INQUIRE_CONTEXT 32 | ||
210 | |||
211 | /** hv_start_all_tiles */ | ||
212 | #define HV_DISPATCH_START_ALL_TILES 33 | ||
213 | |||
214 | /** hv_dev_open */ | ||
215 | #define HV_DISPATCH_DEV_OPEN 34 | ||
216 | |||
217 | /** hv_dev_close */ | ||
218 | #define HV_DISPATCH_DEV_CLOSE 35 | ||
219 | |||
220 | /** hv_dev_pread */ | ||
221 | #define HV_DISPATCH_DEV_PREAD 36 | ||
222 | |||
223 | /** hv_dev_pwrite */ | ||
224 | #define HV_DISPATCH_DEV_PWRITE 37 | ||
225 | |||
226 | /** hv_dev_poll */ | ||
227 | #define HV_DISPATCH_DEV_POLL 38 | ||
228 | |||
229 | /** hv_dev_poll_cancel */ | ||
230 | #define HV_DISPATCH_DEV_POLL_CANCEL 39 | ||
231 | |||
232 | /** hv_dev_preada */ | ||
233 | #define HV_DISPATCH_DEV_PREADA 40 | ||
234 | |||
235 | /** hv_dev_pwritea */ | ||
236 | #define HV_DISPATCH_DEV_PWRITEA 41 | ||
237 | |||
238 | /** hv_flush_remote */ | ||
239 | #define HV_DISPATCH_FLUSH_REMOTE 42 | ||
240 | |||
241 | /** hv_console_putc */ | ||
242 | #define HV_DISPATCH_CONSOLE_PUTC 43 | ||
243 | |||
244 | /** hv_inquire_tiles */ | ||
245 | #define HV_DISPATCH_INQUIRE_TILES 44 | ||
246 | |||
247 | /** hv_confstr */ | ||
248 | #define HV_DISPATCH_CONFSTR 45 | ||
249 | |||
250 | /** hv_reexec */ | ||
251 | #define HV_DISPATCH_REEXEC 46 | ||
252 | |||
253 | /** hv_set_command_line */ | ||
254 | #define HV_DISPATCH_SET_COMMAND_LINE 47 | ||
255 | |||
256 | /** hv_dev_register_intr_state */ | ||
257 | #define HV_DISPATCH_DEV_REGISTER_INTR_STATE 48 | ||
258 | |||
259 | /** hv_enable_intr */ | ||
260 | #define HV_DISPATCH_ENABLE_INTR 49 | ||
261 | |||
262 | /** hv_disable_intr */ | ||
263 | #define HV_DISPATCH_DISABLE_INTR 50 | ||
264 | |||
265 | /** hv_trigger_ipi */ | ||
266 | #define HV_DISPATCH_TRIGGER_IPI 51 | ||
267 | |||
268 | /** hv_store_mapping */ | ||
269 | #define HV_DISPATCH_STORE_MAPPING 52 | ||
270 | |||
271 | /** hv_inquire_realpa */ | ||
272 | #define HV_DISPATCH_INQUIRE_REALPA 53 | ||
273 | |||
274 | /** hv_flush_all */ | ||
275 | #define HV_DISPATCH_FLUSH_ALL 54 | ||
276 | |||
277 | /** One more than the largest dispatch value */ | ||
278 | #define _HV_DISPATCH_END 55 | ||
279 | |||
280 | |||
281 | #ifndef __ASSEMBLER__ | ||
282 | |||
283 | #ifdef __KERNEL__ | ||
284 | #include <asm/types.h> | ||
285 | typedef u32 __hv32; /**< 32-bit value */ | ||
286 | typedef u64 __hv64; /**< 64-bit value */ | ||
287 | #else | ||
288 | #include <stdint.h> | ||
289 | typedef uint32_t __hv32; /**< 32-bit value */ | ||
290 | typedef uint64_t __hv64; /**< 64-bit value */ | ||
291 | #endif | ||
292 | |||
293 | |||
294 | /** Hypervisor physical address. */ | ||
295 | typedef __hv64 HV_PhysAddr; | ||
296 | |||
297 | #if CHIP_VA_WIDTH() > 32 | ||
298 | /** Hypervisor virtual address. */ | ||
299 | typedef __hv64 HV_VirtAddr; | ||
300 | #else | ||
301 | /** Hypervisor virtual address. */ | ||
302 | typedef __hv32 HV_VirtAddr; | ||
303 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
304 | |||
305 | /** Hypervisor ASID. */ | ||
306 | typedef unsigned int HV_ASID; | ||
307 | |||
308 | /** Hypervisor tile location for a memory access | ||
309 | * ("location overridden target"). | ||
310 | */ | ||
311 | typedef unsigned int HV_LOTAR; | ||
312 | |||
313 | /** Hypervisor size of a page. */ | ||
314 | typedef unsigned long HV_PageSize; | ||
315 | |||
316 | /** A page table entry. | ||
317 | */ | ||
318 | typedef struct | ||
319 | { | ||
320 | __hv64 val; /**< Value of PTE */ | ||
321 | } HV_PTE; | ||
322 | |||
323 | /** Hypervisor error code. */ | ||
324 | typedef int HV_Errno; | ||
325 | |||
326 | #endif /* !__ASSEMBLER__ */ | ||
327 | |||
328 | #define HV_OK 0 /**< No error */ | ||
329 | #define HV_EINVAL -801 /**< Invalid argument */ | ||
330 | #define HV_ENODEV -802 /**< No such device */ | ||
331 | #define HV_ENOENT -803 /**< No such file or directory */ | ||
332 | #define HV_EBADF -804 /**< Bad file number */ | ||
333 | #define HV_EFAULT -805 /**< Bad address */ | ||
334 | #define HV_ERECIP -806 /**< Bad recipients */ | ||
335 | #define HV_E2BIG -807 /**< Message too big */ | ||
336 | #define HV_ENOTSUP -808 /**< Service not supported */ | ||
337 | #define HV_EBUSY -809 /**< Device busy */ | ||
338 | #define HV_ENOSYS -810 /**< Invalid syscall */ | ||
339 | #define HV_EPERM -811 /**< No permission */ | ||
340 | #define HV_ENOTREADY -812 /**< Device not ready */ | ||
341 | #define HV_EIO -813 /**< I/O error */ | ||
342 | #define HV_ENOMEM -814 /**< Out of memory */ | ||
343 | |||
344 | #define HV_ERR_MAX -801 /**< Largest HV error code */ | ||
345 | #define HV_ERR_MIN -814 /**< Smallest HV error code */ | ||
346 | |||
347 | #ifndef __ASSEMBLER__ | ||
348 | |||
349 | /** Pass HV_VERSION to hv_init to request this version of the interface. */ | ||
350 | typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; | ||
351 | |||
352 | /** Initializes the hypervisor. | ||
353 | * | ||
354 | * @param interface_version_number The version of the hypervisor interface | ||
355 | * that this program expects, typically HV_VERSION. | ||
356 | * @param chip_num Architecture number of the chip the client was built for. | ||
357 | * @param chip_rev_num Revision number of the chip the client was built for. | ||
358 | */ | ||
359 | void hv_init(HV_VersionNumber interface_version_number, | ||
360 | int chip_num, int chip_rev_num); | ||
361 | |||
362 | |||
363 | /** Queries we can make for hv_sysconf(). | ||
364 | * | ||
365 | * These numbers are part of the binary API and guaranteed not to change. | ||
366 | */ | ||
367 | typedef enum { | ||
368 | /** An invalid value; do not use. */ | ||
369 | _HV_SYSCONF_RESERVED = 0, | ||
370 | |||
371 | /** The length of the glue section containing the hv_ procs, in bytes. */ | ||
372 | HV_SYSCONF_GLUE_SIZE = 1, | ||
373 | |||
374 | /** The size of small pages, in bytes. */ | ||
375 | HV_SYSCONF_PAGE_SIZE_SMALL = 2, | ||
376 | |||
377 | /** The size of large pages, in bytes. */ | ||
378 | HV_SYSCONF_PAGE_SIZE_LARGE = 3, | ||
379 | |||
380 | /** Processor clock speed, in hertz. */ | ||
381 | HV_SYSCONF_CPU_SPEED = 4, | ||
382 | |||
383 | /** Processor temperature, in degrees Kelvin. The value | ||
384 | * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees | ||
385 | * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates | ||
386 | * that the temperature has hit an upper limit and is no longer being | ||
387 | * accurately tracked. | ||
388 | */ | ||
389 | HV_SYSCONF_CPU_TEMP = 5, | ||
390 | |||
391 | /** Board temperature, in degrees Kelvin. The value | ||
392 | * HV_SYSCONF_TEMP_KTOC may be subtracted from this to get degrees | ||
393 | * Celsius. If that Celsius value is HV_SYSCONF_OVERTEMP, this indicates | ||
394 | * that the temperature has hit an upper limit and is no longer being | ||
395 | * accurately tracked. | ||
396 | */ | ||
397 | HV_SYSCONF_BOARD_TEMP = 6 | ||
398 | |||
399 | } HV_SysconfQuery; | ||
400 | |||
401 | /** Offset to subtract from returned Kelvin temperature to get degrees | ||
402 | Celsius. */ | ||
403 | #define HV_SYSCONF_TEMP_KTOC 273 | ||
404 | |||
405 | /** Pseudo-temperature value indicating that the temperature has | ||
406 | * pegged at its upper limit and is no longer accurate; note that this is | ||
407 | * the value after subtracting HV_SYSCONF_TEMP_KTOC. */ | ||
408 | #define HV_SYSCONF_OVERTEMP 999 | ||
409 | |||
410 | /** Query a configuration value from the hypervisor. | ||
411 | * @param query Which value is requested (HV_SYSCONF_xxx). | ||
412 | * @return The requested value, or -1 the requested value is illegal or | ||
413 | * unavailable. | ||
414 | */ | ||
415 | long hv_sysconf(HV_SysconfQuery query); | ||
416 | |||
417 | |||
418 | /** Queries we can make for hv_confstr(). | ||
419 | * | ||
420 | * These numbers are part of the binary API and guaranteed not to change. | ||
421 | */ | ||
422 | typedef enum { | ||
423 | /** An invalid value; do not use. */ | ||
424 | _HV_CONFSTR_RESERVED = 0, | ||
425 | |||
426 | /** Board part number. */ | ||
427 | HV_CONFSTR_BOARD_PART_NUM = 1, | ||
428 | |||
429 | /** Board serial number. */ | ||
430 | HV_CONFSTR_BOARD_SERIAL_NUM = 2, | ||
431 | |||
432 | /** Chip serial number. */ | ||
433 | HV_CONFSTR_CHIP_SERIAL_NUM = 3, | ||
434 | |||
435 | /** Board revision level. */ | ||
436 | HV_CONFSTR_BOARD_REV = 4, | ||
437 | |||
438 | /** Hypervisor software version. */ | ||
439 | HV_CONFSTR_HV_SW_VER = 5, | ||
440 | |||
441 | /** The name for this chip model. */ | ||
442 | HV_CONFSTR_CHIP_MODEL = 6, | ||
443 | |||
444 | /** Human-readable board description. */ | ||
445 | HV_CONFSTR_BOARD_DESC = 7, | ||
446 | |||
447 | /** Human-readable description of the hypervisor configuration. */ | ||
448 | HV_CONFSTR_HV_CONFIG = 8, | ||
449 | |||
450 | /** Human-readable version string for the boot image (for instance, | ||
451 | * who built it and when, what configuration file was used). */ | ||
452 | HV_CONFSTR_HV_CONFIG_VER = 9, | ||
453 | |||
454 | /** Mezzanine part number. */ | ||
455 | HV_CONFSTR_MEZZ_PART_NUM = 10, | ||
456 | |||
457 | /** Mezzanine serial number. */ | ||
458 | HV_CONFSTR_MEZZ_SERIAL_NUM = 11, | ||
459 | |||
460 | /** Mezzanine revision level. */ | ||
461 | HV_CONFSTR_MEZZ_REV = 12, | ||
462 | |||
463 | /** Human-readable mezzanine description. */ | ||
464 | HV_CONFSTR_MEZZ_DESC = 13, | ||
465 | |||
466 | /** Control path for the onboard network switch. */ | ||
467 | HV_CONFSTR_SWITCH_CONTROL = 14, | ||
468 | |||
469 | /** Chip revision level. */ | ||
470 | HV_CONFSTR_CHIP_REV = 15 | ||
471 | |||
472 | } HV_ConfstrQuery; | ||
473 | |||
474 | /** Query a configuration string from the hypervisor. | ||
475 | * | ||
476 | * @param query Identifier for the specific string to be retrieved | ||
477 | * (HV_CONFSTR_xxx). | ||
478 | * @param buf Buffer in which to place the string. | ||
479 | * @param len Length of the buffer. | ||
480 | * @return If query is valid, then the length of the corresponding string, | ||
481 | * including the trailing null; if this is greater than len, the string | ||
482 | * was truncated. If query is invalid, HV_EINVAL. If the specified | ||
483 | * buffer is not writable by the client, HV_EFAULT. | ||
484 | */ | ||
485 | int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); | ||
486 | |||
487 | /** State object used to enable and disable one-shot and level-sensitive | ||
488 | * interrupts. */ | ||
489 | typedef struct | ||
490 | { | ||
491 | #if CHIP_VA_WIDTH() > 32 | ||
492 | __hv64 opaque[2]; /**< No user-serviceable parts inside */ | ||
493 | #else | ||
494 | __hv32 opaque[2]; /**< No user-serviceable parts inside */ | ||
495 | #endif | ||
496 | } | ||
497 | HV_IntrState; | ||
498 | |||
499 | /** A set of interrupts. */ | ||
500 | typedef __hv32 HV_IntrMask; | ||
501 | |||
502 | /** Tile coordinate */ | ||
503 | typedef struct | ||
504 | { | ||
505 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
506 | int x; | ||
507 | |||
508 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
509 | int y; | ||
510 | } HV_Coord; | ||
511 | |||
512 | /** The low interrupt numbers are reserved for use by the client in | ||
513 | * delivering IPIs. Any interrupt numbers higher than this value are | ||
514 | * reserved for use by HV device drivers. */ | ||
515 | #define HV_MAX_IPI_INTERRUPT 7 | ||
516 | |||
517 | /** Register an interrupt state object. This object is used to enable and | ||
518 | * disable one-shot and level-sensitive interrupts. Once the state is | ||
519 | * registered, the client must not read or write the state object; doing | ||
520 | * so will cause undefined results. | ||
521 | * | ||
522 | * @param intr_state Pointer to interrupt state object. | ||
523 | * @return HV_OK on success, or a hypervisor error code. | ||
524 | */ | ||
525 | HV_Errno hv_dev_register_intr_state(HV_IntrState* intr_state); | ||
526 | |||
527 | /** Enable a set of one-shot and level-sensitive interrupts. | ||
528 | * | ||
529 | * @param intr_state Pointer to interrupt state object. | ||
530 | * @param enab_mask Bitmap of interrupts to enable. | ||
531 | */ | ||
532 | void hv_enable_intr(HV_IntrState* intr_state, HV_IntrMask enab_mask); | ||
533 | |||
534 | /** Disable a set of one-shot and level-sensitive interrupts. | ||
535 | * | ||
536 | * @param intr_state Pointer to interrupt state object. | ||
537 | * @param disab_mask Bitmap of interrupts to disable. | ||
538 | */ | ||
539 | void hv_disable_intr(HV_IntrState* intr_state, HV_IntrMask disab_mask); | ||
540 | |||
541 | /** Trigger a one-shot interrupt on some tile | ||
542 | * | ||
543 | * @param tile Which tile to interrupt. | ||
544 | * @param interrupt Interrupt number to trigger; must be between 0 and | ||
545 | * HV_MAX_IPI_INTERRUPT. | ||
546 | * @return HV_OK on success, or a hypervisor error code. | ||
547 | */ | ||
548 | HV_Errno hv_trigger_ipi(HV_Coord tile, int interrupt); | ||
549 | |||
550 | /** Store memory mapping in debug memory so that external debugger can read it. | ||
551 | * A maximum of 16 entries can be stored. | ||
552 | * | ||
553 | * @param va VA of memory that is mapped. | ||
554 | * @param len Length of mapped memory. | ||
555 | * @param pa PA of memory that is mapped. | ||
556 | * @return 0 on success, -1 if the maximum number of mappings is exceeded. | ||
557 | */ | ||
558 | int hv_store_mapping(HV_VirtAddr va, unsigned int len, HV_PhysAddr pa); | ||
559 | |||
560 | /** Given a client PA and a length, return its real (HV) PA. | ||
561 | * | ||
562 | * @param cpa Client physical address. | ||
563 | * @param len Length of mapped memory. | ||
564 | * @return physical address, or -1 if cpa or len is not valid. | ||
565 | */ | ||
566 | HV_PhysAddr hv_inquire_realpa(HV_PhysAddr cpa, unsigned int len); | ||
567 | |||
568 | /** RTC return flag for no RTC chip present. | ||
569 | */ | ||
570 | #define HV_RTC_NO_CHIP 0x1 | ||
571 | |||
572 | /** RTC return flag for low-voltage condition, indicating that battery had | ||
573 | * died and time read is unreliable. | ||
574 | */ | ||
575 | #define HV_RTC_LOW_VOLTAGE 0x2 | ||
576 | |||
577 | /** Date/Time of day */ | ||
578 | typedef struct { | ||
579 | #if CHIP_WORD_SIZE() > 32 | ||
580 | __hv64 tm_sec; /**< Seconds, 0-59 */ | ||
581 | __hv64 tm_min; /**< Minutes, 0-59 */ | ||
582 | __hv64 tm_hour; /**< Hours, 0-23 */ | ||
583 | __hv64 tm_mday; /**< Day of month, 0-30 */ | ||
584 | __hv64 tm_mon; /**< Month, 0-11 */ | ||
585 | __hv64 tm_year; /**< Years since 1900, 0-199 */ | ||
586 | __hv64 flags; /**< Return flags, 0 if no error */ | ||
587 | #else | ||
588 | __hv32 tm_sec; /**< Seconds, 0-59 */ | ||
589 | __hv32 tm_min; /**< Minutes, 0-59 */ | ||
590 | __hv32 tm_hour; /**< Hours, 0-23 */ | ||
591 | __hv32 tm_mday; /**< Day of month, 0-30 */ | ||
592 | __hv32 tm_mon; /**< Month, 0-11 */ | ||
593 | __hv32 tm_year; /**< Years since 1900, 0-199 */ | ||
594 | __hv32 flags; /**< Return flags, 0 if no error */ | ||
595 | #endif | ||
596 | } HV_RTCTime; | ||
597 | |||
598 | /** Read the current time-of-day clock. | ||
599 | * @return HV_RTCTime of current time (GMT). | ||
600 | */ | ||
601 | HV_RTCTime hv_get_rtc(void); | ||
602 | |||
603 | |||
604 | /** Set the current time-of-day clock. | ||
605 | * @param time time to reset time-of-day to (GMT). | ||
606 | */ | ||
607 | void hv_set_rtc(HV_RTCTime time); | ||
608 | |||
609 | /** Installs a context, comprising a page table and other attributes. | ||
610 | * | ||
611 | * Once this service completes, page_table will be used to translate | ||
612 | * subsequent virtual address references to physical memory. | ||
613 | * | ||
614 | * Installing a context does not cause an implicit TLB flush. Before | ||
615 | * reusing an ASID value for a different address space, the client is | ||
616 | * expected to flush old references from the TLB with hv_flush_asid(). | ||
617 | * (Alternately, hv_flush_all() may be used to flush many ASIDs at once.) | ||
618 | * After invalidating a page table entry, changing its attributes, or | ||
619 | * changing its target CPA, the client is expected to flush old references | ||
620 | * from the TLB with hv_flush_page() or hv_flush_pages(). Making a | ||
621 | * previously invalid page valid does not require a flush. | ||
622 | * | ||
623 | * Specifying an invalid ASID, or an invalid CPA (client physical address) | ||
624 | * (either as page_table_pointer, or within the referenced table), | ||
625 | * or another page table data item documented as above as illegal may | ||
626 | * lead to client termination; since the validation of the table is | ||
627 | * done as needed, this may happen before the service returns, or at | ||
628 | * some later time, or never, depending upon the client's pattern of | ||
629 | * memory references. Page table entries which supply translations for | ||
630 | * invalid virtual addresses may result in client termination, or may | ||
631 | * be silently ignored. "Invalid" in this context means a value which | ||
632 | * was not provided to the client via the appropriate hv_inquire_* routine. | ||
633 | * | ||
634 | * To support changing the instruction VAs at the same time as | ||
635 | * installing the new page table, this call explicitly supports | ||
636 | * setting the "lr" register to a different address and then jumping | ||
637 | * directly to the hv_install_context() routine. In this case, the | ||
638 | * new page table does not need to contain any mapping for the | ||
639 | * hv_install_context address itself. | ||
640 | * | ||
641 | * @param page_table Root of the page table. | ||
642 | * @param access PTE providing info on how to read the page table. This | ||
643 | * value must be consistent between multiple tiles sharing a page table, | ||
644 | * and must also be consistent with any virtual mappings the client | ||
645 | * may be using to access the page table. | ||
646 | * @param asid HV_ASID the page table is to be used for. | ||
647 | * @param flags Context flags, denoting attributes or privileges of the | ||
648 | * current context (HV_CTX_xxx). | ||
649 | * @return Zero on success, or a hypervisor error code on failure. | ||
650 | */ | ||
651 | int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid, | ||
652 | __hv32 flags); | ||
653 | |||
654 | #endif /* !__ASSEMBLER__ */ | ||
655 | |||
656 | #define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from | ||
657 | PL0. */ | ||
658 | |||
659 | #ifndef __ASSEMBLER__ | ||
660 | |||
661 | /** Value returned from hv_inquire_context(). */ | ||
662 | typedef struct | ||
663 | { | ||
664 | /** Physical address of page table */ | ||
665 | HV_PhysAddr page_table; | ||
666 | |||
667 | /** PTE which defines access method for top of page table */ | ||
668 | HV_PTE access; | ||
669 | |||
670 | /** ASID associated with this page table */ | ||
671 | HV_ASID asid; | ||
672 | |||
673 | /** Context flags */ | ||
674 | __hv32 flags; | ||
675 | } HV_Context; | ||
676 | |||
677 | /** Retrieve information about the currently installed context. | ||
678 | * @return The data passed to the last successful hv_install_context call. | ||
679 | */ | ||
680 | HV_Context hv_inquire_context(void); | ||
681 | |||
682 | |||
683 | /** Flushes all translations associated with the named address space | ||
684 | * identifier from the TLB and any other hypervisor data structures. | ||
685 | * Translations installed with the "global" bit are not flushed. | ||
686 | * | ||
687 | * Specifying an invalid ASID may lead to client termination. "Invalid" | ||
688 | * in this context means a value which was not provided to the client | ||
689 | * via <tt>hv_inquire_asid()</tt>. | ||
690 | * | ||
691 | * @param asid HV_ASID whose entries are to be flushed. | ||
692 | * @return Zero on success, or a hypervisor error code on failure. | ||
693 | */ | ||
694 | int hv_flush_asid(HV_ASID asid); | ||
695 | |||
696 | |||
697 | /** Flushes all translations associated with the named virtual address | ||
698 | * and page size from the TLB and other hypervisor data structures. Only | ||
699 | * pages visible to the current ASID are affected; note that this includes | ||
700 | * global pages in addition to pages specific to the current ASID. | ||
701 | * | ||
702 | * The supplied VA need not be aligned; it may be anywhere in the | ||
703 | * subject page. | ||
704 | * | ||
705 | * Specifying an invalid virtual address may lead to client termination, | ||
706 | * or may silently succeed. "Invalid" in this context means a value | ||
707 | * which was not provided to the client via hv_inquire_virtual. | ||
708 | * | ||
709 | * @param address Address of the page to flush. | ||
710 | * @param page_size Size of pages to assume. | ||
711 | * @return Zero on success, or a hypervisor error code on failure. | ||
712 | */ | ||
713 | int hv_flush_page(HV_VirtAddr address, HV_PageSize page_size); | ||
714 | |||
715 | |||
716 | /** Flushes all translations associated with the named virtual address range | ||
717 | * and page size from the TLB and other hypervisor data structures. Only | ||
718 | * pages visible to the current ASID are affected; note that this includes | ||
719 | * global pages in addition to pages specific to the current ASID. | ||
720 | * | ||
721 | * The supplied VA need not be aligned; it may be anywhere in the | ||
722 | * subject page. | ||
723 | * | ||
724 | * Specifying an invalid virtual address may lead to client termination, | ||
725 | * or may silently succeed. "Invalid" in this context means a value | ||
726 | * which was not provided to the client via hv_inquire_virtual. | ||
727 | * | ||
728 | * @param start Address to flush. | ||
729 | * @param page_size Size of pages to assume. | ||
730 | * @param size The number of bytes to flush. Any page in the range | ||
731 | * [start, start + size) will be flushed from the TLB. | ||
732 | * @return Zero on success, or a hypervisor error code on failure. | ||
733 | */ | ||
734 | int hv_flush_pages(HV_VirtAddr start, HV_PageSize page_size, | ||
735 | unsigned long size); | ||
736 | |||
737 | |||
738 | /** Flushes all non-global translations (if preserve_global is true), | ||
739 | * or absolutely all translations (if preserve_global is false). | ||
740 | * | ||
741 | * @param preserve_global Non-zero if we want to preserve "global" mappings. | ||
742 | * @return Zero on success, or a hypervisor error code on failure. | ||
743 | */ | ||
744 | int hv_flush_all(int preserve_global); | ||
745 | |||
746 | |||
747 | /** Restart machine with optional restart command and optional args. | ||
748 | * @param cmd Const pointer to command to restart with, or NULL | ||
749 | * @param args Const pointer to argument string to restart with, or NULL | ||
750 | */ | ||
751 | void hv_restart(HV_VirtAddr cmd, HV_VirtAddr args); | ||
752 | |||
753 | |||
754 | /** Halt machine. */ | ||
755 | void hv_halt(void); | ||
756 | |||
757 | |||
758 | /** Power off machine. */ | ||
759 | void hv_power_off(void); | ||
760 | |||
761 | |||
762 | /** Re-enter virtual-is-physical memory translation mode and restart | ||
763 | * execution at a given address. | ||
764 | * @param entry Client physical address at which to begin execution. | ||
765 | * @return A hypervisor error code on failure; if the operation is | ||
766 | * successful the call does not return. | ||
767 | */ | ||
768 | int hv_reexec(HV_PhysAddr entry); | ||
769 | |||
770 | |||
771 | /** Chip topology */ | ||
772 | typedef struct | ||
773 | { | ||
774 | /** Relative coordinates of the querying tile */ | ||
775 | HV_Coord coord; | ||
776 | |||
777 | /** Width of the querying supervisor's tile rectangle. */ | ||
778 | int width; | ||
779 | |||
780 | /** Height of the querying supervisor's tile rectangle. */ | ||
781 | int height; | ||
782 | |||
783 | } HV_Topology; | ||
784 | |||
785 | /** Returns information about the tile coordinate system. | ||
786 | * | ||
787 | * Each supervisor is given a rectangle of tiles it potentially controls. | ||
788 | * These tiles are labeled using a relative coordinate system with (0,0) as | ||
789 | * the upper left tile regardless of their physical location on the chip. | ||
790 | * | ||
791 | * This call returns both the size of that rectangle and the position | ||
792 | * within that rectangle of the querying tile. | ||
793 | * | ||
794 | * Not all tiles within that rectangle may be available to the supervisor; | ||
795 | * to get the precise set of available tiles, you must also call | ||
796 | * hv_inquire_tiles(HV_INQ_TILES_AVAIL, ...). | ||
797 | **/ | ||
798 | HV_Topology hv_inquire_topology(void); | ||
799 | |||
800 | /** Sets of tiles we can retrieve with hv_inquire_tiles(). | ||
801 | * | ||
802 | * These numbers are part of the binary API and guaranteed not to change. | ||
803 | */ | ||
804 | typedef enum { | ||
805 | /** An invalid value; do not use. */ | ||
806 | _HV_INQ_TILES_RESERVED = 0, | ||
807 | |||
808 | /** All available tiles within the supervisor's tile rectangle. */ | ||
809 | HV_INQ_TILES_AVAIL = 1, | ||
810 | |||
811 | /** The set of tiles used for hash-for-home caching. */ | ||
812 | HV_INQ_TILES_HFH_CACHE = 2, | ||
813 | |||
814 | /** The set of tiles that can be legally used as a LOTAR for a PTE. */ | ||
815 | HV_INQ_TILES_LOTAR = 3 | ||
816 | } HV_InqTileSet; | ||
817 | |||
818 | /** Returns specific information about various sets of tiles within the | ||
819 | * supervisor's tile rectangle. | ||
820 | * | ||
821 | * @param set Which set of tiles to retrieve. | ||
822 | * @param cpumask Pointer to a returned bitmask (in row-major order, | ||
823 | * supervisor-relative) of tiles. The low bit of the first word | ||
824 | * corresponds to the tile at the upper left-hand corner of the | ||
825 | * supervisor's rectangle. In order for the supervisor to know the | ||
826 | * buffer length to supply, it should first call hv_inquire_topology. | ||
827 | * @param length Number of bytes available for the returned bitmask. | ||
828 | **/ | ||
829 | HV_Errno hv_inquire_tiles(HV_InqTileSet set, HV_VirtAddr cpumask, int length); | ||
830 | |||
831 | |||
832 | /** An identifier for a memory controller. Multiple memory controllers | ||
833 | * may be connected to one chip, and this uniquely identifies each one. | ||
834 | */ | ||
835 | typedef int HV_MemoryController; | ||
836 | |||
837 | /** A range of physical memory. */ | ||
838 | typedef struct | ||
839 | { | ||
840 | HV_PhysAddr start; /**< Starting address. */ | ||
841 | __hv64 size; /**< Size in bytes. */ | ||
842 | HV_MemoryController controller; /**< Which memory controller owns this. */ | ||
843 | } HV_PhysAddrRange; | ||
844 | |||
845 | /** Returns information about a range of physical memory. | ||
846 | * | ||
847 | * hv_inquire_physical() returns one of the ranges of client | ||
848 | * physical addresses which are available to this client. | ||
849 | * | ||
850 | * The first range is retrieved by specifying an idx of 0, and | ||
851 | * successive ranges are returned with subsequent idx values. Ranges | ||
852 | * are ordered by increasing start address (i.e., as idx increases, | ||
853 | * so does start), do not overlap, and do not touch (i.e., the | ||
854 | * available memory is described with the fewest possible ranges). | ||
855 | * | ||
856 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
857 | * A client can count the number of ranges by increasing idx until the | ||
858 | * returned size is zero. There will always be at least one valid range. | ||
859 | * | ||
860 | * Some clients might not be prepared to deal with more than one | ||
861 | * physical address range; they still ought to call this routine and | ||
862 | * issue a warning message if they're given more than one range, on the | ||
863 | * theory that whoever configured the hypervisor to provide that memory | ||
864 | * should know that it's being wasted. | ||
865 | */ | ||
866 | HV_PhysAddrRange hv_inquire_physical(int idx); | ||
867 | |||
868 | |||
869 | /** Memory controller information. */ | ||
870 | typedef struct | ||
871 | { | ||
872 | HV_Coord coord; /**< Relative tile coordinates of the port used by a | ||
873 | specified tile to communicate with this controller. */ | ||
874 | __hv64 speed; /**< Speed of this controller in bytes per second. */ | ||
875 | } HV_MemoryControllerInfo; | ||
876 | |||
877 | /** Returns information about a particular memory controller. | ||
878 | * | ||
879 | * hv_inquire_memory_controller(coord,idx) returns information about a | ||
880 | * particular controller. Two pieces of information are returned: | ||
881 | * - The relative coordinates of the port on the controller that the specified | ||
882 | * tile would use to contact it. The relative coordinates may lie | ||
883 | * outside the supervisor's rectangle, i.e. the controller may not | ||
884 | * be attached to a node managed by the querying node's supervisor. | ||
885 | * In particular note that x or y may be negative. | ||
886 | * - The speed of the memory controller. (This is a not-to-exceed value | ||
887 | * based on the raw hardware data rate, and may not be achievable in | ||
888 | * practice; it is provided to give clients information on the relative | ||
889 | * performance of the available controllers.) | ||
890 | * | ||
891 | * Clients should avoid calling this interface with invalid values. | ||
892 | * A client who does may be terminated. | ||
893 | * @param coord Tile for which to calculate the relative port position. | ||
894 | * @param controller Index of the controller; identical to value returned | ||
895 | * from other routines like hv_inquire_physical. | ||
896 | * @return Information about the controller. | ||
897 | */ | ||
898 | HV_MemoryControllerInfo hv_inquire_memory_controller(HV_Coord coord, | ||
899 | int controller); | ||
900 | |||
901 | |||
902 | /** A range of virtual memory. */ | ||
903 | typedef struct | ||
904 | { | ||
905 | HV_VirtAddr start; /**< Starting address. */ | ||
906 | __hv64 size; /**< Size in bytes. */ | ||
907 | } HV_VirtAddrRange; | ||
908 | |||
909 | /** Returns information about a range of virtual memory. | ||
910 | * | ||
911 | * hv_inquire_virtual() returns one of the ranges of client | ||
912 | * virtual addresses which are available to this client. | ||
913 | * | ||
914 | * The first range is retrieved by specifying an idx of 0, and | ||
915 | * successive ranges are returned with subsequent idx values. Ranges | ||
916 | * are ordered by increasing start address (i.e., as idx increases, | ||
917 | * so does start), do not overlap, and do not touch (i.e., the | ||
918 | * available memory is described with the fewest possible ranges). | ||
919 | * | ||
920 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
921 | * A client can count the number of ranges by increasing idx until the | ||
922 | * returned size is zero. There will always be at least one valid range. | ||
923 | * | ||
924 | * Some clients may well have various virtual addresses hardwired | ||
925 | * into themselves; for instance, their instruction stream may | ||
926 | * have been compiled expecting to live at a particular address. | ||
927 | * Such clients should use this interface to verify they've been | ||
928 | * given the virtual address space they expect, and issue a (potentially | ||
929 | * fatal) warning message otherwise. | ||
930 | * | ||
931 | * Note that the returned size is a __hv64, not a __hv32, so it is | ||
932 | * possible to express a single range spanning the entire 32-bit | ||
933 | * address space. | ||
934 | */ | ||
935 | HV_VirtAddrRange hv_inquire_virtual(int idx); | ||
936 | |||
937 | |||
938 | /** A range of ASID values. */ | ||
939 | typedef struct | ||
940 | { | ||
941 | HV_ASID start; /**< First ASID in the range. */ | ||
942 | unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */ | ||
943 | } HV_ASIDRange; | ||
944 | |||
945 | /** Returns information about a range of ASIDs. | ||
946 | * | ||
947 | * hv_inquire_asid() returns one of the ranges of address | ||
948 | * space identifiers which are available to this client. | ||
949 | * | ||
950 | * The first range is retrieved by specifying an idx of 0, and | ||
951 | * successive ranges are returned with subsequent idx values. Ranges | ||
952 | * are ordered by increasing start value (i.e., as idx increases, | ||
953 | * so does start), do not overlap, and do not touch (i.e., the | ||
954 | * available ASIDs are described with the fewest possible ranges). | ||
955 | * | ||
956 | * If an out-of-range idx value is specified, the returned size will be zero. | ||
957 | * A client can count the number of ranges by increasing idx until the | ||
958 | * returned size is zero. There will always be at least one valid range. | ||
959 | */ | ||
960 | HV_ASIDRange hv_inquire_asid(int idx); | ||
961 | |||
962 | |||
963 | /** Waits for at least the specified number of nanoseconds then returns. | ||
964 | * | ||
965 | * @param nanosecs The number of nanoseconds to sleep. | ||
966 | */ | ||
967 | void hv_nanosleep(int nanosecs); | ||
968 | |||
969 | |||
970 | /** Reads a character from the console without blocking. | ||
971 | * | ||
972 | * @return A value from 0-255 indicates the value successfully read. | ||
973 | * A negative value means no value was ready. | ||
974 | */ | ||
975 | int hv_console_read_if_ready(void); | ||
976 | |||
977 | |||
978 | /** Writes a character to the console, blocking if the console is busy. | ||
979 | * | ||
980 | * This call cannot fail. If the console is broken for some reason, | ||
981 | * output will simply vanish. | ||
982 | * @param byte Character to write. | ||
983 | */ | ||
984 | void hv_console_putc(int byte); | ||
985 | |||
986 | |||
987 | /** Writes a string to the console, blocking if the console is busy. | ||
988 | * @param bytes Pointer to characters to write. | ||
989 | * @param len Number of characters to write. | ||
990 | * @return Number of characters written, or HV_EFAULT if the buffer is invalid. | ||
991 | */ | ||
992 | int hv_console_write(HV_VirtAddr bytes, int len); | ||
993 | |||
994 | |||
995 | /** Dispatch the next interrupt from the client downcall mechanism. | ||
996 | * | ||
997 | * The hypervisor uses downcalls to notify the client of asynchronous | ||
998 | * events. Some of these events are hypervisor-created (like incoming | ||
999 | * messages). Some are regular interrupts which initially occur in | ||
1000 | * the hypervisor, and are normally handled directly by the client; | ||
1001 | * when these occur in a client's interrupt critical section, they must | ||
1002 | * be delivered through the downcall mechanism. | ||
1003 | * | ||
1004 | * A downcall is initially delivered to the client as an INTCTRL_1 | ||
1005 | * interrupt. Upon entry to the INTCTRL_1 vector, the client must | ||
1006 | * immediately invoke the hv_downcall_dispatch service. This service | ||
1007 | * will not return; instead it will cause one of the client's actual | ||
1008 | * downcall-handling interrupt vectors to be entered. The EX_CONTEXT | ||
1009 | * registers in the client will be set so that when the client irets, | ||
1010 | * it will return to the code which was interrupted by the INTCTRL_1 | ||
1011 | * interrupt. | ||
1012 | * | ||
1013 | * Any saving of registers should be done by the actual handling | ||
1014 | * vectors; no registers should be changed by the INTCTRL_1 handler. | ||
1015 | * In particular, the client should not use a jal instruction to invoke | ||
1016 | * the hv_downcall_dispatch service, as that would overwrite the client's | ||
1017 | * lr register. Note that the hv_downcall_dispatch service may overwrite | ||
1018 | * one or more of the client's system save registers. | ||
1019 | * | ||
1020 | * The client must not modify the INTCTRL_1_STATUS SPR. The hypervisor | ||
1021 | * will set this register to cause a downcall to happen, and will clear | ||
1022 | * it when no further downcalls are pending. | ||
1023 | * | ||
1024 | * When a downcall vector is entered, the INTCTRL_1 interrupt will be | ||
1025 | * masked. When the client is done processing a downcall, and is ready | ||
1026 | * to accept another, it must unmask this interrupt; if more downcalls | ||
1027 | * are pending, this will cause the INTCTRL_1 vector to be reentered. | ||
1028 | * Currently the following interrupt vectors can be entered through a | ||
1029 | * downcall: | ||
1030 | * | ||
1031 | * INT_MESSAGE_RCV_DWNCL (hypervisor message available) | ||
1032 | * INT_DMATLB_MISS_DWNCL (DMA TLB miss) | ||
1033 | * INT_SNITLB_MISS_DWNCL (SNI TLB miss) | ||
1034 | * INT_DMATLB_ACCESS_DWNCL (DMA TLB access violation) | ||
1035 | */ | ||
1036 | void hv_downcall_dispatch(void); | ||
1037 | |||
1038 | #endif /* !__ASSEMBLER__ */ | ||
1039 | |||
1040 | /** We use actual interrupt vectors which never occur (they're only there | ||
1041 | * to allow setting MPLs for related SPRs) for our downcall vectors. | ||
1042 | */ | ||
1043 | /** Message receive downcall interrupt vector */ | ||
1044 | #define INT_MESSAGE_RCV_DWNCL INT_BOOT_ACCESS | ||
1045 | /** DMA TLB miss downcall interrupt vector */ | ||
1046 | #define INT_DMATLB_MISS_DWNCL INT_DMA_ASID | ||
1047 | /** Static nework processor instruction TLB miss interrupt vector */ | ||
1048 | #define INT_SNITLB_MISS_DWNCL INT_SNI_ASID | ||
1049 | /** DMA TLB access violation downcall interrupt vector */ | ||
1050 | #define INT_DMATLB_ACCESS_DWNCL INT_DMA_CPL | ||
1051 | /** Device interrupt downcall interrupt vector */ | ||
1052 | #define INT_DEV_INTR_DWNCL INT_WORLD_ACCESS | ||
1053 | |||
1054 | #ifndef __ASSEMBLER__ | ||
1055 | |||
1056 | /** Requests the inode for a specific full pathname. | ||
1057 | * | ||
1058 | * Performs a lookup in the hypervisor filesystem for a given filename. | ||
1059 | * Multiple calls with the same filename will always return the same inode. | ||
1060 | * If there is no such filename, HV_ENOENT is returned. | ||
1061 | * A bad filename pointer may result in HV_EFAULT instead. | ||
1062 | * | ||
1063 | * @param filename Constant pointer to name of requested file | ||
1064 | * @return Inode of requested file | ||
1065 | */ | ||
1066 | int hv_fs_findfile(HV_VirtAddr filename); | ||
1067 | |||
1068 | |||
1069 | /** Data returned from an fstat request. | ||
1070 | * Note that this structure should be no more than 40 bytes in size so | ||
1071 | * that it can always be returned completely in registers. | ||
1072 | */ | ||
1073 | typedef struct | ||
1074 | { | ||
1075 | int size; /**< Size of file (or HV_Errno on error) */ | ||
1076 | unsigned int flags; /**< Flags (see HV_FS_FSTAT_FLAGS) */ | ||
1077 | } HV_FS_StatInfo; | ||
1078 | |||
1079 | /** Bitmask flags for fstat request */ | ||
1080 | typedef enum | ||
1081 | { | ||
1082 | HV_FS_ISDIR = 0x0001 /**< Is the entry a directory? */ | ||
1083 | } HV_FS_FSTAT_FLAGS; | ||
1084 | |||
1085 | /** Get stat information on a given file inode. | ||
1086 | * | ||
1087 | * Return information on the file with the given inode. | ||
1088 | * | ||
1089 | * IF the HV_FS_ISDIR bit is set, the "file" is a directory. Reading | ||
1090 | * it will return NUL-separated filenames (no directory part) relative | ||
1091 | * to the path to the inode of the directory "file". These can be | ||
1092 | * appended to the path to the directory "file" after a forward slash | ||
1093 | * to create additional filenames. Note that it is not required | ||
1094 | * that all valid paths be decomposable into valid parent directories; | ||
1095 | * a filesystem may validly have just a few files, none of which have | ||
1096 | * HV_FS_ISDIR set. However, if clients may wish to enumerate the | ||
1097 | * files in the filesystem, it is recommended to include all the | ||
1098 | * appropriate parent directory "files" to give a consistent view. | ||
1099 | * | ||
1100 | * An invalid file inode will cause an HV_EBADF error to be returned. | ||
1101 | * | ||
1102 | * @param inode The inode number of the query | ||
1103 | * @return An HV_FS_StatInfo structure | ||
1104 | */ | ||
1105 | HV_FS_StatInfo hv_fs_fstat(int inode); | ||
1106 | |||
1107 | |||
1108 | /** Read data from a specific hypervisor file. | ||
1109 | * On error, may return HV_EBADF for a bad inode or HV_EFAULT for a bad buf. | ||
1110 | * Reads near the end of the file will return fewer bytes than requested. | ||
1111 | * Reads at or beyond the end of a file will return zero. | ||
1112 | * | ||
1113 | * @param inode the hypervisor file to read | ||
1114 | * @param buf the buffer to read data into | ||
1115 | * @param length the number of bytes of data to read | ||
1116 | * @param offset the offset into the file to read the data from | ||
1117 | * @return number of bytes successfully read, or an HV_Errno code | ||
1118 | */ | ||
1119 | int hv_fs_pread(int inode, HV_VirtAddr buf, int length, int offset); | ||
1120 | |||
1121 | |||
1122 | /** Read a 64-bit word from the specified physical address. | ||
1123 | * The address must be 8-byte aligned. | ||
1124 | * Specifying an invalid physical address will lead to client termination. | ||
1125 | * @param addr The physical address to read | ||
1126 | * @param access The PTE describing how to read the memory | ||
1127 | * @return The 64-bit value read from the given address | ||
1128 | */ | ||
1129 | unsigned long long hv_physaddr_read64(HV_PhysAddr addr, HV_PTE access); | ||
1130 | |||
1131 | |||
1132 | /** Write a 64-bit word to the specified physical address. | ||
1133 | * The address must be 8-byte aligned. | ||
1134 | * Specifying an invalid physical address will lead to client termination. | ||
1135 | * @param addr The physical address to write | ||
1136 | * @param access The PTE that says how to write the memory | ||
1137 | * @param val The 64-bit value to write to the given address | ||
1138 | */ | ||
1139 | void hv_physaddr_write64(HV_PhysAddr addr, HV_PTE access, | ||
1140 | unsigned long long val); | ||
1141 | |||
1142 | |||
1143 | /** Get the value of the command-line for the supervisor, if any. | ||
1144 | * This will not include the filename of the booted supervisor, but may | ||
1145 | * include configured-in boot arguments or the hv_restart() arguments. | ||
1146 | * If the buffer is not long enough the hypervisor will NUL the first | ||
1147 | * character of the buffer but not write any other data. | ||
1148 | * @param buf The virtual address to write the command-line string to. | ||
1149 | * @param length The length of buf, in characters. | ||
1150 | * @return The actual length of the command line, including the trailing NUL | ||
1151 | * (may be larger than "length"). | ||
1152 | */ | ||
1153 | int hv_get_command_line(HV_VirtAddr buf, int length); | ||
1154 | |||
1155 | |||
1156 | /** Set a new value for the command-line for the supervisor, which will | ||
1157 | * be returned from subsequent invocations of hv_get_command_line() on | ||
1158 | * this tile. | ||
1159 | * @param buf The virtual address to read the command-line string from. | ||
1160 | * @param length The length of buf, in characters; must be no more than | ||
1161 | * HV_COMMAND_LINE_LEN. | ||
1162 | * @return Zero if successful, or a hypervisor error code. | ||
1163 | */ | ||
1164 | HV_Errno hv_set_command_line(HV_VirtAddr buf, int length); | ||
1165 | |||
1166 | /** Maximum size of a command line passed to hv_set_command_line(); note | ||
1167 | * that a line returned from hv_get_command_line() could be larger than | ||
1168 | * this.*/ | ||
1169 | #define HV_COMMAND_LINE_LEN 256 | ||
1170 | |||
1171 | /** Tell the hypervisor how to cache non-priority pages | ||
1172 | * (its own as well as pages explicitly represented in page tables). | ||
1173 | * Normally these will be represented as red/black pages, but | ||
1174 | * when the supervisor starts to allocate "priority" pages in the PTE | ||
1175 | * the hypervisor will need to start marking those pages as (e.g.) "red" | ||
1176 | * and non-priority pages as either "black" (if they cache-alias | ||
1177 | * with the existing priority pages) or "red/black" (if they don't). | ||
1178 | * The bitmask provides information on which parts of the cache | ||
1179 | * have been used for pinned pages so far on this tile; if (1 << N) | ||
1180 | * appears in the bitmask, that indicates that a page has been marked | ||
1181 | * "priority" whose PFN equals N, mod 8. | ||
1182 | * @param bitmask A bitmap of priority page set values | ||
1183 | */ | ||
1184 | void hv_set_caching(unsigned int bitmask); | ||
1185 | |||
1186 | |||
1187 | /** Zero out a specified number of pages. | ||
1188 | * The va and size must both be multiples of 4096. | ||
1189 | * Caches are bypassed and memory is directly set to zero. | ||
1190 | * This API is implemented only in the magic hypervisor and is intended | ||
1191 | * to provide a performance boost to the minimal supervisor by | ||
1192 | * giving it a fast way to zero memory pages when allocating them. | ||
1193 | * @param va Virtual address where the page has been mapped | ||
1194 | * @param size Number of bytes (must be a page size multiple) | ||
1195 | */ | ||
1196 | void hv_bzero_page(HV_VirtAddr va, unsigned int size); | ||
1197 | |||
1198 | |||
1199 | /** State object for the hypervisor messaging subsystem. */ | ||
1200 | typedef struct | ||
1201 | { | ||
1202 | #if CHIP_VA_WIDTH() > 32 | ||
1203 | __hv64 opaque[2]; /**< No user-serviceable parts inside */ | ||
1204 | #else | ||
1205 | __hv32 opaque[2]; /**< No user-serviceable parts inside */ | ||
1206 | #endif | ||
1207 | } | ||
1208 | HV_MsgState; | ||
1209 | |||
1210 | /** Register to receive incoming messages. | ||
1211 | * | ||
1212 | * This routine configures the current tile so that it can receive | ||
1213 | * incoming messages. It must be called before the client can receive | ||
1214 | * messages with the hv_receive_message routine, and must be called on | ||
1215 | * each tile which will receive messages. | ||
1216 | * | ||
1217 | * msgstate is the virtual address of a state object of type HV_MsgState. | ||
1218 | * Once the state is registered, the client must not read or write the | ||
1219 | * state object; doing so will cause undefined results. | ||
1220 | * | ||
1221 | * If this routine is called with msgstate set to 0, the client's message | ||
1222 | * state will be freed and it will no longer be able to receive messages. | ||
1223 | * Note that this may cause the loss of any as-yet-undelivered messages | ||
1224 | * for the client. | ||
1225 | * | ||
1226 | * If another client attempts to send a message to a client which has | ||
1227 | * not yet called hv_register_message_state, or which has freed its | ||
1228 | * message state, the message will not be delivered, as if the client | ||
1229 | * had insufficient buffering. | ||
1230 | * | ||
1231 | * This routine returns HV_OK if the registration was successful, and | ||
1232 | * HV_EINVAL if the supplied state object is unsuitable. Note that some | ||
1233 | * errors may not be detected during this routine, but might be detected | ||
1234 | * during a subsequent message delivery. | ||
1235 | * @param msgstate State object. | ||
1236 | **/ | ||
1237 | HV_Errno hv_register_message_state(HV_MsgState* msgstate); | ||
1238 | |||
1239 | /** Possible message recipient states. */ | ||
1240 | typedef enum | ||
1241 | { | ||
1242 | HV_TO_BE_SENT, /**< Not sent (not attempted, or recipient not ready) */ | ||
1243 | HV_SENT, /**< Successfully sent */ | ||
1244 | HV_BAD_RECIP /**< Bad recipient coordinates (permanent error) */ | ||
1245 | } HV_Recip_State; | ||
1246 | |||
1247 | /** Message recipient. */ | ||
1248 | typedef struct | ||
1249 | { | ||
1250 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
1251 | unsigned int x:11; | ||
1252 | |||
1253 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
1254 | unsigned int y:11; | ||
1255 | |||
1256 | /** Status of this recipient */ | ||
1257 | HV_Recip_State state:10; | ||
1258 | } HV_Recipient; | ||
1259 | |||
1260 | /** Send a message to a set of recipients. | ||
1261 | * | ||
1262 | * This routine sends a message to a set of recipients. | ||
1263 | * | ||
1264 | * recips is an array of HV_Recipient structures. Each specifies a tile, | ||
1265 | * and a message state; initially, it is expected that the state will | ||
1266 | * be set to HV_TO_BE_SENT. nrecip specifies the number of recipients | ||
1267 | * in the recips array. | ||
1268 | * | ||
1269 | * For each recipient whose state is HV_TO_BE_SENT, the hypervisor attempts | ||
1270 | * to send that tile the specified message. In order to successfully | ||
1271 | * receive the message, the receiver must be a valid tile to which the | ||
1272 | * sender has access, must not be the sending tile itself, and must have | ||
1273 | * sufficient free buffer space. (The hypervisor guarantees that each | ||
1274 | * tile which has called hv_register_message_state() will be able to | ||
1275 | * buffer one message from every other tile which can legally send to it; | ||
1276 | * more space may be provided but is not guaranteed.) If an invalid tile | ||
1277 | * is specified, the recipient's state is set to HV_BAD_RECIP; this is a | ||
1278 | * permanent delivery error. If the message is successfully delivered | ||
1279 | * to the recipient's buffer, the recipient's state is set to HV_SENT. | ||
1280 | * Otherwise, the recipient's state is unchanged. Message delivery is | ||
1281 | * synchronous; all attempts to send messages are completed before this | ||
1282 | * routine returns. | ||
1283 | * | ||
1284 | * If no permanent delivery errors were encountered, the routine returns | ||
1285 | * the number of messages successfully sent: that is, the number of | ||
1286 | * recipients whose states changed from HV_TO_BE_SENT to HV_SENT during | ||
1287 | * this operation. If any permanent delivery errors were encountered, | ||
1288 | * the routine returns HV_ERECIP. In the event of permanent delivery | ||
1289 | * errors, it may be the case that delivery was not attempted to all | ||
1290 | * recipients; if any messages were succesfully delivered, however, | ||
1291 | * recipients' state values will be updated appropriately. | ||
1292 | * | ||
1293 | * It is explicitly legal to specify a recipient structure whose state | ||
1294 | * is not HV_TO_BE_SENT; such a recipient is ignored. One suggested way | ||
1295 | * of using hv_send_message to send a message to multiple tiles is to set | ||
1296 | * up a list of recipients, and then call the routine repeatedly with the | ||
1297 | * same list, each time accumulating the number of messages successfully | ||
1298 | * sent, until all messages are sent, a permanent error is encountered, | ||
1299 | * or the desired number of attempts have been made. When used in this | ||
1300 | * way, the routine will deliver each message no more than once to each | ||
1301 | * recipient. | ||
1302 | * | ||
1303 | * Note that a message being successfully delivered to the recipient's | ||
1304 | * buffer space does not guarantee that it is received by the recipient, | ||
1305 | * either immediately or at any time in the future; the recipient might | ||
1306 | * never call hv_receive_message, or could register a different state | ||
1307 | * buffer, losing the message. | ||
1308 | * | ||
1309 | * Specifiying the same recipient more than once in the recipient list | ||
1310 | * is an error, which will not result in an error return but which may | ||
1311 | * or may not result in more than one message being delivered to the | ||
1312 | * recipient tile. | ||
1313 | * | ||
1314 | * buf and buflen specify the message to be sent. buf is a virtual address | ||
1315 | * which must be currently mapped in the client's page table; if not, the | ||
1316 | * routine returns HV_EFAULT. buflen must be greater than zero and less | ||
1317 | * than or equal to HV_MAX_MESSAGE_SIZE, and nrecip must be less than the | ||
1318 | * number of tiles to which the sender has access; if not, the routine | ||
1319 | * returns HV_EINVAL. | ||
1320 | * @param recips List of recipients. | ||
1321 | * @param nrecip Number of recipients. | ||
1322 | * @param buf Address of message data. | ||
1323 | * @param buflen Length of message data. | ||
1324 | **/ | ||
1325 | int hv_send_message(HV_Recipient *recips, int nrecip, | ||
1326 | HV_VirtAddr buf, int buflen); | ||
1327 | |||
1328 | /** Maximum hypervisor message size, in bytes */ | ||
1329 | #define HV_MAX_MESSAGE_SIZE 28 | ||
1330 | |||
1331 | |||
1332 | /** Return value from hv_receive_message() */ | ||
1333 | typedef struct | ||
1334 | { | ||
1335 | int msglen; /**< Message length in bytes, or an error code */ | ||
1336 | __hv32 source; /**< Code identifying message sender (HV_MSG_xxx) */ | ||
1337 | } HV_RcvMsgInfo; | ||
1338 | |||
1339 | #define HV_MSG_TILE 0x0 /**< Message source is another tile */ | ||
1340 | #define HV_MSG_INTR 0x1 /**< Message source is a driver interrupt */ | ||
1341 | |||
1342 | /** Receive a message. | ||
1343 | * | ||
1344 | * This routine retrieves a message from the client's incoming message | ||
1345 | * buffer. | ||
1346 | * | ||
1347 | * Multiple messages sent from a particular sending tile to a particular | ||
1348 | * receiving tile are received in the order that they were sent; however, | ||
1349 | * no ordering is guaranteed between messages sent by different tiles. | ||
1350 | * | ||
1351 | * Whenever the a client's message buffer is empty, the first message | ||
1352 | * subsequently received will cause the client's MESSAGE_RCV_DWNCL | ||
1353 | * interrupt vector to be invoked through the interrupt downcall mechanism | ||
1354 | * (see the description of the hv_downcall_dispatch() routine for details | ||
1355 | * on downcalls). | ||
1356 | * | ||
1357 | * Another message-available downcall will not occur until a call to | ||
1358 | * this routine is made when the message buffer is empty, and a message | ||
1359 | * subsequently arrives. Note that such a downcall could occur while | ||
1360 | * this routine is executing. If the calling code does not wish this | ||
1361 | * to happen, it is recommended that this routine be called with the | ||
1362 | * INTCTRL_1 interrupt masked, or inside an interrupt critical section. | ||
1363 | * | ||
1364 | * msgstate is the value previously passed to hv_register_message_state(). | ||
1365 | * buf is the virtual address of the buffer into which the message will | ||
1366 | * be written; buflen is the length of the buffer. | ||
1367 | * | ||
1368 | * This routine returns an HV_RcvMsgInfo structure. The msglen member | ||
1369 | * of that structure is the length of the message received, zero if no | ||
1370 | * message is available, or HV_E2BIG if the message is too large for the | ||
1371 | * specified buffer. If the message is too large, it is not consumed, | ||
1372 | * and may be retrieved by a subsequent call to this routine specifying | ||
1373 | * a sufficiently large buffer. A buffer which is HV_MAX_MESSAGE_SIZE | ||
1374 | * bytes long is guaranteed to be able to receive any possible message. | ||
1375 | * | ||
1376 | * The source member of the HV_RcvMsgInfo structure describes the sender | ||
1377 | * of the message. For messages sent by another client tile via an | ||
1378 | * hv_send_message() call, this value is HV_MSG_TILE; for messages sent | ||
1379 | * as a result of a device interrupt, this value is HV_MSG_INTR. | ||
1380 | */ | ||
1381 | |||
1382 | HV_RcvMsgInfo hv_receive_message(HV_MsgState msgstate, HV_VirtAddr buf, | ||
1383 | int buflen); | ||
1384 | |||
1385 | |||
1386 | /** Start remaining tiles owned by this supervisor. Initially, only one tile | ||
1387 | * executes the client program; after it calls this service, the other tiles | ||
1388 | * are started. This allows the initial tile to do one-time configuration | ||
1389 | * of shared data structures without having to lock them against simultaneous | ||
1390 | * access. | ||
1391 | */ | ||
1392 | void hv_start_all_tiles(void); | ||
1393 | |||
1394 | |||
1395 | /** Open a hypervisor device. | ||
1396 | * | ||
1397 | * This service initializes an I/O device and its hypervisor driver software, | ||
1398 | * and makes it available for use. The open operation is per-device per-chip; | ||
1399 | * once it has been performed, the device handle returned may be used in other | ||
1400 | * device services calls made by any tile. | ||
1401 | * | ||
1402 | * @param name Name of the device. A base device name is just a text string | ||
1403 | * (say, "pcie"). If there is more than one instance of a device, the | ||
1404 | * base name is followed by a slash and a device number (say, "pcie/0"). | ||
1405 | * Some devices may support further structure beneath those components; | ||
1406 | * most notably, devices which require control operations do so by | ||
1407 | * supporting reads and/or writes to a control device whose name | ||
1408 | * includes a trailing "/ctl" (say, "pcie/0/ctl"). | ||
1409 | * @param flags Flags (HV_DEV_xxx). | ||
1410 | * @return A positive integer device handle, or a negative error code. | ||
1411 | */ | ||
1412 | int hv_dev_open(HV_VirtAddr name, __hv32 flags); | ||
1413 | |||
1414 | |||
1415 | /** Close a hypervisor device. | ||
1416 | * | ||
1417 | * This service uninitializes an I/O device and its hypervisor driver | ||
1418 | * software, and makes it unavailable for use. The close operation is | ||
1419 | * per-device per-chip; once it has been performed, the device is no longer | ||
1420 | * available. Normally there is no need to ever call the close service. | ||
1421 | * | ||
1422 | * @param devhdl Device handle of the device to be closed. | ||
1423 | * @return Zero if the close is successful, otherwise, a negative error code. | ||
1424 | */ | ||
1425 | int hv_dev_close(int devhdl); | ||
1426 | |||
1427 | |||
1428 | /** Read data from a hypervisor device synchronously. | ||
1429 | * | ||
1430 | * This service transfers data from a hypervisor device to a memory buffer. | ||
1431 | * When the service returns, the data has been written from the memory buffer, | ||
1432 | * and the buffer will not be further modified by the driver. | ||
1433 | * | ||
1434 | * No ordering is guaranteed between requests issued from different tiles. | ||
1435 | * | ||
1436 | * Devices may choose to support both the synchronous and asynchronous read | ||
1437 | * operations, only one of them, or neither of them. | ||
1438 | * | ||
1439 | * @param devhdl Device handle of the device to be read from. | ||
1440 | * @param flags Flags (HV_DEV_xxx). | ||
1441 | * @param va Virtual address of the target data buffer. This buffer must | ||
1442 | * be mapped in the currently installed page table; if not, HV_EFAULT | ||
1443 | * may be returned. | ||
1444 | * @param len Number of bytes to be transferred. | ||
1445 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1446 | * often a byte offset from the beginning of the device; in other cases, | ||
1447 | * like on a control device, it may have a different meaning. | ||
1448 | * @return A non-negative value if the read was at least partially successful; | ||
1449 | * otherwise, a negative error code. The precise interpretation of | ||
1450 | * the return value is driver-dependent, but many drivers will return | ||
1451 | * the number of bytes successfully transferred. | ||
1452 | */ | ||
1453 | int hv_dev_pread(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, | ||
1454 | __hv64 offset); | ||
1455 | |||
1456 | #define HV_DEV_NB_EMPTY 0x1 /**< Don't block when no bytes of data can | ||
1457 | be transferred. */ | ||
1458 | #define HV_DEV_NB_PARTIAL 0x2 /**< Don't block when some bytes, but not all | ||
1459 | of the requested bytes, can be | ||
1460 | transferred. */ | ||
1461 | #define HV_DEV_NOCACHE 0x4 /**< The caller warrants that none of the | ||
1462 | cache lines which might contain data | ||
1463 | from the requested buffer are valid. | ||
1464 | Useful with asynchronous operations | ||
1465 | only. */ | ||
1466 | |||
1467 | #define HV_DEV_ALLFLAGS (HV_DEV_NB_EMPTY | HV_DEV_NB_PARTIAL | \ | ||
1468 | HV_DEV_NOCACHE) /**< All HV_DEV_xxx flags */ | ||
1469 | |||
1470 | /** Write data to a hypervisor device synchronously. | ||
1471 | * | ||
1472 | * This service transfers data from a memory buffer to a hypervisor device. | ||
1473 | * When the service returns, the data has been read from the memory buffer, | ||
1474 | * and the buffer may be overwritten by the client; the data may not | ||
1475 | * necessarily have been conveyed to the actual hardware I/O interface. | ||
1476 | * | ||
1477 | * No ordering is guaranteed between requests issued from different tiles. | ||
1478 | * | ||
1479 | * Devices may choose to support both the synchronous and asynchronous write | ||
1480 | * operations, only one of them, or neither of them. | ||
1481 | * | ||
1482 | * @param devhdl Device handle of the device to be written to. | ||
1483 | * @param flags Flags (HV_DEV_xxx). | ||
1484 | * @param va Virtual address of the source data buffer. This buffer must | ||
1485 | * be mapped in the currently installed page table; if not, HV_EFAULT | ||
1486 | * may be returned. | ||
1487 | * @param len Number of bytes to be transferred. | ||
1488 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1489 | * often a byte offset from the beginning of the device; in other cases, | ||
1490 | * like on a control device, it may have a different meaning. | ||
1491 | * @return A non-negative value if the write was at least partially successful; | ||
1492 | * otherwise, a negative error code. The precise interpretation of | ||
1493 | * the return value is driver-dependent, but many drivers will return | ||
1494 | * the number of bytes successfully transferred. | ||
1495 | */ | ||
1496 | int hv_dev_pwrite(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, | ||
1497 | __hv64 offset); | ||
1498 | |||
1499 | |||
1500 | /** Interrupt arguments, used in the asynchronous I/O interfaces. */ | ||
1501 | #if CHIP_VA_WIDTH() > 32 | ||
1502 | typedef __hv64 HV_IntArg; | ||
1503 | #else | ||
1504 | typedef __hv32 HV_IntArg; | ||
1505 | #endif | ||
1506 | |||
1507 | /** Interrupt messages are delivered via the mechanism as normal messages, | ||
1508 | * but have a message source of HV_DEV_INTR. The message is formatted | ||
1509 | * as an HV_IntrMsg structure. | ||
1510 | */ | ||
1511 | |||
1512 | typedef struct | ||
1513 | { | ||
1514 | HV_IntArg intarg; /**< Interrupt argument, passed to the poll/preada/pwritea | ||
1515 | services */ | ||
1516 | HV_IntArg intdata; /**< Interrupt-specific interrupt data */ | ||
1517 | } HV_IntrMsg; | ||
1518 | |||
1519 | /** Request an interrupt message when a device condition is satisfied. | ||
1520 | * | ||
1521 | * This service requests that an interrupt message be delivered to the | ||
1522 | * requesting tile when a device becomes readable or writable, or when any | ||
1523 | * data queued to the device via previous write operations from this tile | ||
1524 | * has been actually sent out on the hardware I/O interface. Devices may | ||
1525 | * choose to support any, all, or none of the available conditions. | ||
1526 | * | ||
1527 | * If multiple conditions are specified, only one message will be | ||
1528 | * delivered. If the event mask delivered to that interrupt handler | ||
1529 | * indicates that some of the conditions have not yet occurred, the | ||
1530 | * client must issue another poll() call if it wishes to wait for those | ||
1531 | * conditions. | ||
1532 | * | ||
1533 | * Only one poll may be outstanding per device handle per tile. If more than | ||
1534 | * one tile is polling on the same device and condition, they will all be | ||
1535 | * notified when it happens. Because of this, clients may not assume that | ||
1536 | * the condition signaled is necessarily still true when they request a | ||
1537 | * subsequent service; for instance, the readable data which caused the | ||
1538 | * poll call to interrupt may have been read by another tile in the interim. | ||
1539 | * | ||
1540 | * The notification interrupt message could come directly, or via the | ||
1541 | * downcall (intctrl1) method, depending on what the tile is doing | ||
1542 | * when the condition is satisfied. Note that it is possible for the | ||
1543 | * requested interrupt to be delivered after this service is called but | ||
1544 | * before it returns. | ||
1545 | * | ||
1546 | * @param devhdl Device handle of the device to be polled. | ||
1547 | * @param events Flags denoting the events which will cause the interrupt to | ||
1548 | * be delivered (HV_DEVPOLL_xxx). | ||
1549 | * @param intarg Value which will be delivered as the intarg member of the | ||
1550 | * eventual interrupt message; the intdata member will be set to a | ||
1551 | * mask of HV_DEVPOLL_xxx values indicating which conditions have been | ||
1552 | * satisifed. | ||
1553 | * @return Zero if the interrupt was successfully scheduled; otherwise, a | ||
1554 | * negative error code. | ||
1555 | */ | ||
1556 | int hv_dev_poll(int devhdl, __hv32 events, HV_IntArg intarg); | ||
1557 | |||
1558 | #define HV_DEVPOLL_READ 0x1 /**< Test device for readability */ | ||
1559 | #define HV_DEVPOLL_WRITE 0x2 /**< Test device for writability */ | ||
1560 | #define HV_DEVPOLL_FLUSH 0x4 /**< Test device for output drained */ | ||
1561 | |||
1562 | |||
1563 | /** Cancel a request for an interrupt when a device event occurs. | ||
1564 | * | ||
1565 | * This service requests that no interrupt be delivered when the events | ||
1566 | * noted in the last-issued poll() call happen. Once this service returns, | ||
1567 | * the interrupt has been canceled; however, it is possible for the interrupt | ||
1568 | * to be delivered after this service is called but before it returns. | ||
1569 | * | ||
1570 | * @param devhdl Device handle of the device on which to cancel polling. | ||
1571 | * @return Zero if the poll was successfully canceled; otherwise, a negative | ||
1572 | * error code. | ||
1573 | */ | ||
1574 | int hv_dev_poll_cancel(int devhdl); | ||
1575 | |||
1576 | |||
1577 | /** Scatter-gather list for preada/pwritea calls. */ | ||
1578 | typedef struct | ||
1579 | #if CHIP_VA_WIDTH() <= 32 | ||
1580 | __attribute__ ((packed, aligned(4))) | ||
1581 | #endif | ||
1582 | { | ||
1583 | HV_PhysAddr pa; /**< Client physical address of the buffer segment. */ | ||
1584 | HV_PTE pte; /**< Page table entry describing the caching and location | ||
1585 | override characteristics of the buffer segment. Some | ||
1586 | drivers ignore this element and will require that | ||
1587 | the NOCACHE flag be set on their requests. */ | ||
1588 | __hv32 len; /**< Length of the buffer segment. */ | ||
1589 | } HV_SGL; | ||
1590 | |||
1591 | #define HV_SGL_MAXLEN 16 /**< Maximum number of entries in a scatter-gather | ||
1592 | list */ | ||
1593 | |||
1594 | /** Read data from a hypervisor device asynchronously. | ||
1595 | * | ||
1596 | * This service transfers data from a hypervisor device to a memory buffer. | ||
1597 | * When the service returns, the read has been scheduled. When the read | ||
1598 | * completes, an interrupt message will be delivered, and the buffer will | ||
1599 | * not be further modified by the driver. | ||
1600 | * | ||
1601 | * The number of possible outstanding asynchronous requests is defined by | ||
1602 | * each driver, but it is recommended that it be at least two requests | ||
1603 | * per tile per device. | ||
1604 | * | ||
1605 | * No ordering is guaranteed between synchronous and asynchronous requests, | ||
1606 | * even those issued on the same tile. | ||
1607 | * | ||
1608 | * The completion interrupt message could come directly, or via the downcall | ||
1609 | * (intctrl1) method, depending on what the tile is doing when the read | ||
1610 | * completes. Interrupts do not coalesce; one is delivered for each | ||
1611 | * asynchronous I/O request. Note that it is possible for the requested | ||
1612 | * interrupt to be delivered after this service is called but before it | ||
1613 | * returns. | ||
1614 | * | ||
1615 | * Devices may choose to support both the synchronous and asynchronous read | ||
1616 | * operations, only one of them, or neither of them. | ||
1617 | * | ||
1618 | * @param devhdl Device handle of the device to be read from. | ||
1619 | * @param flags Flags (HV_DEV_xxx). | ||
1620 | * @param sgl_len Number of elements in the scatter-gather list. | ||
1621 | * @param sgl Scatter-gather list describing the memory to which data will be | ||
1622 | * written. | ||
1623 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1624 | * often a byte offset from the beginning of the device; in other cases, | ||
1625 | * like on a control device, it may have a different meaning. | ||
1626 | * @param intarg Value which will be delivered as the intarg member of the | ||
1627 | * eventual interrupt message; the intdata member will be set to the | ||
1628 | * normal return value from the read request. | ||
1629 | * @return Zero if the read was successfully scheduled; otherwise, a negative | ||
1630 | * error code. Note that some drivers may choose to pre-validate | ||
1631 | * their arguments, and may thus detect certain device error | ||
1632 | * conditions at this time rather than when the completion notification | ||
1633 | * occurs, but this is not required. | ||
1634 | */ | ||
1635 | int hv_dev_preada(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
1636 | HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg); | ||
1637 | |||
1638 | |||
1639 | /** Write data to a hypervisor device asynchronously. | ||
1640 | * | ||
1641 | * This service transfers data from a memory buffer to a hypervisor | ||
1642 | * device. When the service returns, the write has been scheduled. | ||
1643 | * When the write completes, an interrupt message will be delivered, | ||
1644 | * and the buffer may be overwritten by the client; the data may not | ||
1645 | * necessarily have been conveyed to the actual hardware I/O interface. | ||
1646 | * | ||
1647 | * The number of possible outstanding asynchronous requests is defined by | ||
1648 | * each driver, but it is recommended that it be at least two requests | ||
1649 | * per tile per device. | ||
1650 | * | ||
1651 | * No ordering is guaranteed between synchronous and asynchronous requests, | ||
1652 | * even those issued on the same tile. | ||
1653 | * | ||
1654 | * The completion interrupt message could come directly, or via the downcall | ||
1655 | * (intctrl1) method, depending on what the tile is doing when the read | ||
1656 | * completes. Interrupts do not coalesce; one is delivered for each | ||
1657 | * asynchronous I/O request. Note that it is possible for the requested | ||
1658 | * interrupt to be delivered after this service is called but before it | ||
1659 | * returns. | ||
1660 | * | ||
1661 | * Devices may choose to support both the synchronous and asynchronous write | ||
1662 | * operations, only one of them, or neither of them. | ||
1663 | * | ||
1664 | * @param devhdl Device handle of the device to be read from. | ||
1665 | * @param flags Flags (HV_DEV_xxx). | ||
1666 | * @param sgl_len Number of elements in the scatter-gather list. | ||
1667 | * @param sgl Scatter-gather list describing the memory from which data will be | ||
1668 | * read. | ||
1669 | * @param offset Driver-dependent offset. For a random-access device, this is | ||
1670 | * often a byte offset from the beginning of the device; in other cases, | ||
1671 | * like on a control device, it may have a different meaning. | ||
1672 | * @param intarg Value which will be delivered as the intarg member of the | ||
1673 | * eventual interrupt message; the intdata member will be set to the | ||
1674 | * normal return value from the write request. | ||
1675 | * @return Zero if the write was successfully scheduled; otherwise, a negative | ||
1676 | * error code. Note that some drivers may choose to pre-validate | ||
1677 | * their arguments, and may thus detect certain device error | ||
1678 | * conditions at this time rather than when the completion notification | ||
1679 | * occurs, but this is not required. | ||
1680 | */ | ||
1681 | int hv_dev_pwritea(int devhdl, __hv32 flags, __hv32 sgl_len, | ||
1682 | HV_SGL sgl[/* sgl_len */], __hv64 offset, HV_IntArg intarg); | ||
1683 | |||
1684 | |||
1685 | /** Define a pair of tile and ASID to identify a user process context. */ | ||
1686 | typedef struct | ||
1687 | { | ||
1688 | /** X coordinate, relative to supervisor's top-left coordinate */ | ||
1689 | unsigned int x:11; | ||
1690 | |||
1691 | /** Y coordinate, relative to supervisor's top-left coordinate */ | ||
1692 | unsigned int y:11; | ||
1693 | |||
1694 | /** ASID of the process on this x,y tile */ | ||
1695 | HV_ASID asid:10; | ||
1696 | } HV_Remote_ASID; | ||
1697 | |||
1698 | /** Flush cache and/or TLB state on remote tiles. | ||
1699 | * | ||
1700 | * @param cache_pa Client physical address to flush from cache (ignored if | ||
1701 | * the length encoded in cache_control is zero, or if | ||
1702 | * HV_FLUSH_EVICT_L2 is set, or if cache_cpumask is NULL). | ||
1703 | * @param cache_control This argument allows you to specify a length of | ||
1704 | * physical address space to flush (maximum HV_FLUSH_MAX_CACHE_LEN). | ||
1705 | * You can "or" in HV_FLUSH_EVICT_L2 to flush the whole L2 cache. | ||
1706 | * You can "or" in HV_FLUSH_EVICT_LI1 to flush the whole LII cache. | ||
1707 | * HV_FLUSH_ALL flushes all caches. | ||
1708 | * @param cache_cpumask Bitmask (in row-major order, supervisor-relative) of | ||
1709 | * tile indices to perform cache flush on. The low bit of the first | ||
1710 | * word corresponds to the tile at the upper left-hand corner of the | ||
1711 | * supervisor's rectangle. If passed as a NULL pointer, equivalent | ||
1712 | * to an empty bitmask. On chips which support hash-for-home caching, | ||
1713 | * if passed as -1, equivalent to a mask containing tiles which could | ||
1714 | * be doing hash-for-home caching. | ||
1715 | * @param tlb_va Virtual address to flush from TLB (ignored if | ||
1716 | * tlb_length is zero or tlb_cpumask is NULL). | ||
1717 | * @param tlb_length Number of bytes of data to flush from the TLB. | ||
1718 | * @param tlb_pgsize Page size to use for TLB flushes. | ||
1719 | * tlb_va and tlb_length need not be aligned to this size. | ||
1720 | * @param tlb_cpumask Bitmask for tlb flush, like cache_cpumask. | ||
1721 | * If passed as a NULL pointer, equivalent to an empty bitmask. | ||
1722 | * @param asids Pointer to an HV_Remote_ASID array of tile/ASID pairs to flush. | ||
1723 | * @param asidcount Number of HV_Remote_ASID entries in asids[]. | ||
1724 | * @return Zero for success, or else HV_EINVAL or HV_EFAULT for errors that | ||
1725 | * are detected while parsing the arguments. | ||
1726 | */ | ||
1727 | int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control, | ||
1728 | unsigned long* cache_cpumask, | ||
1729 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
1730 | unsigned long tlb_pgsize, unsigned long* tlb_cpumask, | ||
1731 | HV_Remote_ASID* asids, int asidcount); | ||
1732 | |||
1733 | /** Include in cache_control to ensure a flush of the entire L2. */ | ||
1734 | #define HV_FLUSH_EVICT_L2 (1UL << 31) | ||
1735 | |||
1736 | /** Include in cache_control to ensure a flush of the entire L1I. */ | ||
1737 | #define HV_FLUSH_EVICT_L1I (1UL << 30) | ||
1738 | |||
1739 | /** Maximum legal size to use for the "length" component of cache_control. */ | ||
1740 | #define HV_FLUSH_MAX_CACHE_LEN ((1UL << 30) - 1) | ||
1741 | |||
1742 | /** Use for cache_control to ensure a flush of all caches. */ | ||
1743 | #define HV_FLUSH_ALL -1UL | ||
1744 | |||
1745 | #else /* __ASSEMBLER__ */ | ||
1746 | |||
1747 | /** Include in cache_control to ensure a flush of the entire L2. */ | ||
1748 | #define HV_FLUSH_EVICT_L2 (1 << 31) | ||
1749 | |||
1750 | /** Include in cache_control to ensure a flush of the entire L1I. */ | ||
1751 | #define HV_FLUSH_EVICT_L1I (1 << 30) | ||
1752 | |||
1753 | /** Maximum legal size to use for the "length" component of cache_control. */ | ||
1754 | #define HV_FLUSH_MAX_CACHE_LEN ((1 << 30) - 1) | ||
1755 | |||
1756 | /** Use for cache_control to ensure a flush of all caches. */ | ||
1757 | #define HV_FLUSH_ALL -1 | ||
1758 | |||
1759 | #endif /* __ASSEMBLER__ */ | ||
1760 | |||
1761 | #ifndef __ASSEMBLER__ | ||
1762 | |||
1763 | /** Return a 64-bit value corresponding to the PTE if needed */ | ||
1764 | #define hv_pte_val(pte) ((pte).val) | ||
1765 | |||
1766 | /** Cast a 64-bit value to an HV_PTE */ | ||
1767 | #define hv_pte(val) ((HV_PTE) { val }) | ||
1768 | |||
1769 | #endif /* !__ASSEMBLER__ */ | ||
1770 | |||
1771 | |||
1772 | /** Bits in the size of an HV_PTE */ | ||
1773 | #define HV_LOG2_PTE_SIZE 3 | ||
1774 | |||
1775 | /** Size of an HV_PTE */ | ||
1776 | #define HV_PTE_SIZE (1 << HV_LOG2_PTE_SIZE) | ||
1777 | |||
1778 | |||
1779 | /* Bits in HV_PTE's low word. */ | ||
1780 | #define HV_PTE_INDEX_PRESENT 0 /**< PTE is valid */ | ||
1781 | #define HV_PTE_INDEX_MIGRATING 1 /**< Page is migrating */ | ||
1782 | #define HV_PTE_INDEX_CLIENT0 2 /**< Page client state 0 */ | ||
1783 | #define HV_PTE_INDEX_CLIENT1 3 /**< Page client state 1 */ | ||
1784 | #define HV_PTE_INDEX_NC 4 /**< L1$/L2$ incoherent with L3$ */ | ||
1785 | #define HV_PTE_INDEX_NO_ALLOC_L1 5 /**< Page is uncached in local L1$ */ | ||
1786 | #define HV_PTE_INDEX_NO_ALLOC_L2 6 /**< Page is uncached in local L2$ */ | ||
1787 | #define HV_PTE_INDEX_CACHED_PRIORITY 7 /**< Page is priority cached */ | ||
1788 | #define HV_PTE_INDEX_PAGE 8 /**< PTE describes a page */ | ||
1789 | #define HV_PTE_INDEX_GLOBAL 9 /**< Page is global */ | ||
1790 | #define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */ | ||
1791 | #define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */ | ||
1792 | #define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */ | ||
1793 | /* Bits 13-15 are reserved for | ||
1794 | future use. */ | ||
1795 | #define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */ | ||
1796 | #define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */ | ||
1797 | /* Bit 19 is reserved for | ||
1798 | future use. */ | ||
1799 | #define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits | ||
1800 | of word */ | ||
1801 | #define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */ | ||
1802 | |||
1803 | /* Bits in HV_PTE's high word. */ | ||
1804 | #define HV_PTE_INDEX_READABLE 32 /**< Page is readable */ | ||
1805 | #define HV_PTE_INDEX_WRITABLE 33 /**< Page is writable */ | ||
1806 | #define HV_PTE_INDEX_EXECUTABLE 34 /**< Page is executable */ | ||
1807 | #define HV_PTE_INDEX_PTFN 35 /**< Page's PTFN; must be high bits | ||
1808 | of word */ | ||
1809 | #define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ | ||
1810 | |||
1811 | /** Position of the PFN field within the PTE (subset of the PTFN). */ | ||
1812 | #define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \ | ||
1813 | HV_LOG2_PAGE_TABLE_ALIGN)) | ||
1814 | |||
1815 | /** Length of the PFN field within the PTE (subset of the PTFN). */ | ||
1816 | #define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \ | ||
1817 | (HV_LOG2_PAGE_SIZE_SMALL - \ | ||
1818 | HV_LOG2_PAGE_TABLE_ALIGN)) | ||
1819 | |||
1820 | /* | ||
1821 | * Legal values for the PTE's mode field | ||
1822 | */ | ||
1823 | /** Data is not resident in any caches; loads and stores access memory | ||
1824 | * directly. | ||
1825 | */ | ||
1826 | #define HV_PTE_MODE_UNCACHED 1 | ||
1827 | |||
1828 | /** Data is resident in the tile's local L1 and/or L2 caches; if a load | ||
1829 | * or store misses there, it goes to memory. | ||
1830 | * | ||
1831 | * The copy in the local L1$/L2$ is not invalidated when the copy in | ||
1832 | * memory is changed. | ||
1833 | */ | ||
1834 | #define HV_PTE_MODE_CACHE_NO_L3 2 | ||
1835 | |||
1836 | /** Data is resident in the tile's local L1 and/or L2 caches. If a load | ||
1837 | * or store misses there, it goes to an L3 cache in a designated tile; | ||
1838 | * if it misses there, it goes to memory. | ||
1839 | * | ||
1840 | * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated | ||
1841 | * when the copy in the remote L3$ is changed. Otherwise, such | ||
1842 | * invalidation will not occur. | ||
1843 | * | ||
1844 | * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support | ||
1845 | * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is | ||
1846 | * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode. | ||
1847 | */ | ||
1848 | #define HV_PTE_MODE_CACHE_TILE_L3 3 | ||
1849 | |||
1850 | /** Data is resident in the tile's local L1 and/or L2 caches. If a load | ||
1851 | * or store misses there, it goes to an L3 cache in one of a set of | ||
1852 | * designated tiles; if it misses there, it goes to memory. Which tile | ||
1853 | * is chosen from the set depends upon a hash function applied to the | ||
1854 | * physical address. This mode is not supported on chips for which | ||
1855 | * CHIP_HAS_CBOX_HOME_MAP() is 0. | ||
1856 | * | ||
1857 | * If the NC bit is not set, the copy in the local L1$/L2$ is invalidated | ||
1858 | * when the copy in the remote L3$ is changed. Otherwise, such | ||
1859 | * invalidation will not occur. | ||
1860 | * | ||
1861 | * Chips for which CHIP_HAS_COHERENT_LOCAL_CACHE() is 0 do not support | ||
1862 | * invalidation from an L3$ to another tile's L1$/L2$. If the NC bit is | ||
1863 | * clear on such a chip, no copy is kept in the local L1$/L2$ in this mode. | ||
1864 | */ | ||
1865 | #define HV_PTE_MODE_CACHE_HASH_L3 4 | ||
1866 | |||
1867 | /** Data is not resident in memory; accesses are instead made to an I/O | ||
1868 | * device, whose tile coordinates are given by the PTE's LOTAR field. | ||
1869 | * This mode is only supported on chips for which CHIP_HAS_MMIO() is 1. | ||
1870 | * The EXECUTABLE bit may not be set in an MMIO PTE. | ||
1871 | */ | ||
1872 | #define HV_PTE_MODE_MMIO 5 | ||
1873 | |||
1874 | |||
1875 | /* C wants 1ULL so it is typed as __hv64, but the assembler needs just numbers. | ||
1876 | * The assembler can't handle shifts greater than 31, but treats them | ||
1877 | * as shifts mod 32, so assembler code must be aware of which word | ||
1878 | * the bit belongs in when using these macros. | ||
1879 | */ | ||
1880 | #ifdef __ASSEMBLER__ | ||
1881 | #define __HV_PTE_ONE 1 /**< One, for assembler */ | ||
1882 | #else | ||
1883 | #define __HV_PTE_ONE 1ULL /**< One, for C */ | ||
1884 | #endif | ||
1885 | |||
1886 | /** Is this PTE present? | ||
1887 | * | ||
1888 | * If this bit is set, this PTE represents a valid translation or level-2 | ||
1889 | * page table pointer. Otherwise, the page table does not contain a | ||
1890 | * translation for the subject virtual pages. | ||
1891 | * | ||
1892 | * If this bit is not set, the other bits in the PTE are not | ||
1893 | * interpreted by the hypervisor, and may contain any value. | ||
1894 | */ | ||
1895 | #define HV_PTE_PRESENT (__HV_PTE_ONE << HV_PTE_INDEX_PRESENT) | ||
1896 | |||
1897 | /** Does this PTE map a page? | ||
1898 | * | ||
1899 | * If this bit is set in the level-1 page table, the entry should be | ||
1900 | * interpreted as a level-2 page table entry mapping a large page. | ||
1901 | * | ||
1902 | * This bit should not be modified by the client while PRESENT is set, as | ||
1903 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1904 | * | ||
1905 | * In a level-2 page table, this bit is ignored and must be zero. | ||
1906 | */ | ||
1907 | #define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE) | ||
1908 | |||
1909 | /** Is this a global (non-ASID) mapping? | ||
1910 | * | ||
1911 | * If this bit is set, the translations established by this PTE will | ||
1912 | * not be flushed from the TLB by the hv_flush_asid() service; they | ||
1913 | * will be flushed by the hv_flush_page() or hv_flush_pages() services. | ||
1914 | * | ||
1915 | * Setting this bit for translations which are identical in all page | ||
1916 | * tables (for instance, code and data belonging to a client OS) can | ||
1917 | * be very beneficial, as it will reduce the number of TLB misses. | ||
1918 | * Note that, while it is not an error which will be detected by the | ||
1919 | * hypervisor, it is an extremely bad idea to set this bit for | ||
1920 | * translations which are _not_ identical in all page tables. | ||
1921 | * | ||
1922 | * This bit should not be modified by the client while PRESENT is set, as | ||
1923 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1924 | * | ||
1925 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1926 | */ | ||
1927 | #define HV_PTE_GLOBAL (__HV_PTE_ONE << HV_PTE_INDEX_GLOBAL) | ||
1928 | |||
1929 | /** Is this mapping accessible to users? | ||
1930 | * | ||
1931 | * If this bit is set, code running at any PL will be permitted to | ||
1932 | * access the virtual addresses mapped by this PTE. Otherwise, only | ||
1933 | * code running at PL 1 or above will be allowed to do so. | ||
1934 | * | ||
1935 | * This bit should not be modified by the client while PRESENT is set, as | ||
1936 | * doing so may race with the hypervisor's update of ACCESSED and DIRTY bits. | ||
1937 | * | ||
1938 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1939 | */ | ||
1940 | #define HV_PTE_USER (__HV_PTE_ONE << HV_PTE_INDEX_USER) | ||
1941 | |||
1942 | /** Has this mapping been accessed? | ||
1943 | * | ||
1944 | * This bit is set by the hypervisor when the memory described by the | ||
1945 | * translation is accessed for the first time. It is never cleared by | ||
1946 | * the hypervisor, but may be cleared by the client. After the bit | ||
1947 | * has been cleared, subsequent references are not guaranteed to set | ||
1948 | * it again until the translation has been flushed from the TLB. | ||
1949 | * | ||
1950 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1951 | */ | ||
1952 | #define HV_PTE_ACCESSED (__HV_PTE_ONE << HV_PTE_INDEX_ACCESSED) | ||
1953 | |||
1954 | /** Is this mapping dirty? | ||
1955 | * | ||
1956 | * This bit is set by the hypervisor when the memory described by the | ||
1957 | * translation is written for the first time. It is never cleared by | ||
1958 | * the hypervisor, but may be cleared by the client. After the bit | ||
1959 | * has been cleared, subsequent references are not guaranteed to set | ||
1960 | * it again until the translation has been flushed from the TLB. | ||
1961 | * | ||
1962 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
1963 | */ | ||
1964 | #define HV_PTE_DIRTY (__HV_PTE_ONE << HV_PTE_INDEX_DIRTY) | ||
1965 | |||
1966 | /** Migrating bit in PTE. | ||
1967 | * | ||
1968 | * This bit is guaranteed not to be inspected or modified by the | ||
1969 | * hypervisor. The name is indicative of the suggested use by the client | ||
1970 | * to tag pages whose L3 cache is being migrated from one cpu to another. | ||
1971 | */ | ||
1972 | #define HV_PTE_MIGRATING (__HV_PTE_ONE << HV_PTE_INDEX_MIGRATING) | ||
1973 | |||
1974 | /** Client-private bit in PTE. | ||
1975 | * | ||
1976 | * This bit is guaranteed not to be inspected or modified by the | ||
1977 | * hypervisor. | ||
1978 | */ | ||
1979 | #define HV_PTE_CLIENT0 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT0) | ||
1980 | |||
1981 | /** Client-private bit in PTE. | ||
1982 | * | ||
1983 | * This bit is guaranteed not to be inspected or modified by the | ||
1984 | * hypervisor. | ||
1985 | */ | ||
1986 | #define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1) | ||
1987 | |||
1988 | /** Non-coherent (NC) bit in PTE. | ||
1989 | * | ||
1990 | * If this bit is set, the mapping that is set up will be non-coherent | ||
1991 | * (also known as non-inclusive). This means that changes to the L3 | ||
1992 | * cache will not cause a local copy to be invalidated. It is generally | ||
1993 | * recommended only for read-only mappings. | ||
1994 | * | ||
1995 | * In level-1 PTEs, if the Page bit is clear, this bit determines how the | ||
1996 | * level-2 page table is accessed. | ||
1997 | */ | ||
1998 | #define HV_PTE_NC (__HV_PTE_ONE << HV_PTE_INDEX_NC) | ||
1999 | |||
2000 | /** Is this page prevented from filling the L1$? | ||
2001 | * | ||
2002 | * If this bit is set, the page described by the PTE will not be cached | ||
2003 | * the local cpu's L1 cache. | ||
2004 | * | ||
2005 | * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip, | ||
2006 | * it is illegal to use this attribute, and may cause client termination. | ||
2007 | * | ||
2008 | * In level-1 PTEs, if the Page bit is clear, this bit | ||
2009 | * determines how the level-2 page table is accessed. | ||
2010 | */ | ||
2011 | #define HV_PTE_NO_ALLOC_L1 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L1) | ||
2012 | |||
2013 | /** Is this page prevented from filling the L2$? | ||
2014 | * | ||
2015 | * If this bit is set, the page described by the PTE will not be cached | ||
2016 | * the local cpu's L2 cache. | ||
2017 | * | ||
2018 | * If CHIP_HAS_NC_AND_NOALLOC_BITS() is not true in <chip.h> for this chip, | ||
2019 | * it is illegal to use this attribute, and may cause client termination. | ||
2020 | * | ||
2021 | * In level-1 PTEs, if the Page bit is clear, this bit determines how the | ||
2022 | * level-2 page table is accessed. | ||
2023 | */ | ||
2024 | #define HV_PTE_NO_ALLOC_L2 (__HV_PTE_ONE << HV_PTE_INDEX_NO_ALLOC_L2) | ||
2025 | |||
2026 | /** Is this a priority page? | ||
2027 | * | ||
2028 | * If this bit is set, the page described by the PTE will be given | ||
2029 | * priority in the cache. Normally this translates into allowing the | ||
2030 | * page to use only the "red" half of the cache. The client may wish to | ||
2031 | * then use the hv_set_caching service to specify that other pages which | ||
2032 | * alias this page will use only the "black" half of the cache. | ||
2033 | * | ||
2034 | * If the Cached Priority bit is clear, the hypervisor uses the | ||
2035 | * current hv_set_caching() value to choose how to cache the page. | ||
2036 | * | ||
2037 | * It is illegal to set the Cached Priority bit if the Non-Cached bit | ||
2038 | * is set and the Cached Remotely bit is clear, i.e. if requests to | ||
2039 | * the page map directly to memory. | ||
2040 | * | ||
2041 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2042 | */ | ||
2043 | #define HV_PTE_CACHED_PRIORITY (__HV_PTE_ONE << \ | ||
2044 | HV_PTE_INDEX_CACHED_PRIORITY) | ||
2045 | |||
2046 | /** Is this a readable mapping? | ||
2047 | * | ||
2048 | * If this bit is set, code will be permitted to read from (e.g., | ||
2049 | * issue load instructions against) the virtual addresses mapped by | ||
2050 | * this PTE. | ||
2051 | * | ||
2052 | * It is illegal for this bit to be clear if the Writable bit is set. | ||
2053 | * | ||
2054 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2055 | */ | ||
2056 | #define HV_PTE_READABLE (__HV_PTE_ONE << HV_PTE_INDEX_READABLE) | ||
2057 | |||
2058 | /** Is this a writable mapping? | ||
2059 | * | ||
2060 | * If this bit is set, code will be permitted to write to (e.g., issue | ||
2061 | * store instructions against) the virtual addresses mapped by this | ||
2062 | * PTE. | ||
2063 | * | ||
2064 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2065 | */ | ||
2066 | #define HV_PTE_WRITABLE (__HV_PTE_ONE << HV_PTE_INDEX_WRITABLE) | ||
2067 | |||
2068 | /** Is this an executable mapping? | ||
2069 | * | ||
2070 | * If this bit is set, code will be permitted to execute from | ||
2071 | * (e.g., jump to) the virtual addresses mapped by this PTE. | ||
2072 | * | ||
2073 | * This bit applies to any processor on the tile, if there are more | ||
2074 | * than one. | ||
2075 | * | ||
2076 | * This bit is ignored in level-1 PTEs unless the Page bit is set. | ||
2077 | */ | ||
2078 | #define HV_PTE_EXECUTABLE (__HV_PTE_ONE << HV_PTE_INDEX_EXECUTABLE) | ||
2079 | |||
2080 | /** The width of a LOTAR's x or y bitfield. */ | ||
2081 | #define HV_LOTAR_WIDTH 11 | ||
2082 | |||
2083 | /** Converts an x,y pair to a LOTAR value. */ | ||
2084 | #define HV_XY_TO_LOTAR(x, y) ((HV_LOTAR)(((x) << HV_LOTAR_WIDTH) | (y))) | ||
2085 | |||
2086 | /** Extracts the X component of a lotar. */ | ||
2087 | #define HV_LOTAR_X(lotar) ((lotar) >> HV_LOTAR_WIDTH) | ||
2088 | |||
2089 | /** Extracts the Y component of a lotar. */ | ||
2090 | #define HV_LOTAR_Y(lotar) ((lotar) & ((1 << HV_LOTAR_WIDTH) - 1)) | ||
2091 | |||
2092 | #ifndef __ASSEMBLER__ | ||
2093 | |||
2094 | /** Define accessor functions for a PTE bit. */ | ||
2095 | #define _HV_BIT(name, bit) \ | ||
2096 | static __inline int \ | ||
2097 | hv_pte_get_##name(HV_PTE pte) \ | ||
2098 | { \ | ||
2099 | return (pte.val >> HV_PTE_INDEX_##bit) & 1; \ | ||
2100 | } \ | ||
2101 | \ | ||
2102 | static __inline HV_PTE \ | ||
2103 | hv_pte_set_##name(HV_PTE pte) \ | ||
2104 | { \ | ||
2105 | pte.val |= 1ULL << HV_PTE_INDEX_##bit; \ | ||
2106 | return pte; \ | ||
2107 | } \ | ||
2108 | \ | ||
2109 | static __inline HV_PTE \ | ||
2110 | hv_pte_clear_##name(HV_PTE pte) \ | ||
2111 | { \ | ||
2112 | pte.val &= ~(1ULL << HV_PTE_INDEX_##bit); \ | ||
2113 | return pte; \ | ||
2114 | } | ||
2115 | |||
2116 | /* Generate accessors to get, set, and clear various PTE flags. | ||
2117 | */ | ||
2118 | _HV_BIT(present, PRESENT) | ||
2119 | _HV_BIT(page, PAGE) | ||
2120 | _HV_BIT(client0, CLIENT0) | ||
2121 | _HV_BIT(client1, CLIENT1) | ||
2122 | _HV_BIT(migrating, MIGRATING) | ||
2123 | _HV_BIT(nc, NC) | ||
2124 | _HV_BIT(readable, READABLE) | ||
2125 | _HV_BIT(writable, WRITABLE) | ||
2126 | _HV_BIT(executable, EXECUTABLE) | ||
2127 | _HV_BIT(accessed, ACCESSED) | ||
2128 | _HV_BIT(dirty, DIRTY) | ||
2129 | _HV_BIT(no_alloc_l1, NO_ALLOC_L1) | ||
2130 | _HV_BIT(no_alloc_l2, NO_ALLOC_L2) | ||
2131 | _HV_BIT(cached_priority, CACHED_PRIORITY) | ||
2132 | _HV_BIT(global, GLOBAL) | ||
2133 | _HV_BIT(user, USER) | ||
2134 | |||
2135 | #undef _HV_BIT | ||
2136 | |||
2137 | /** Get the page mode from the PTE. | ||
2138 | * | ||
2139 | * This field generally determines whether and how accesses to the page | ||
2140 | * are cached; the HV_PTE_MODE_xxx symbols define the legal values for the | ||
2141 | * page mode. The NC, NO_ALLOC_L1, and NO_ALLOC_L2 bits modify this | ||
2142 | * general policy. | ||
2143 | */ | ||
2144 | static __inline unsigned int | ||
2145 | hv_pte_get_mode(const HV_PTE pte) | ||
2146 | { | ||
2147 | return (((__hv32) pte.val) >> HV_PTE_INDEX_MODE) & | ||
2148 | ((1 << HV_PTE_MODE_BITS) - 1); | ||
2149 | } | ||
2150 | |||
2151 | /** Set the page mode into a PTE. See hv_pte_get_mode. */ | ||
2152 | static __inline HV_PTE | ||
2153 | hv_pte_set_mode(HV_PTE pte, unsigned int val) | ||
2154 | { | ||
2155 | pte.val &= ~(((1ULL << HV_PTE_MODE_BITS) - 1) << HV_PTE_INDEX_MODE); | ||
2156 | pte.val |= val << HV_PTE_INDEX_MODE; | ||
2157 | return pte; | ||
2158 | } | ||
2159 | |||
2160 | /** Get the page frame number from the PTE. | ||
2161 | * | ||
2162 | * This field contains the upper bits of the CPA (client physical | ||
2163 | * address) of the target page; the complete CPA is this field with | ||
2164 | * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. | ||
2165 | * | ||
2166 | * For PTEs in a level-1 page table where the Page bit is set, the | ||
2167 | * CPA must be aligned modulo the large page size. | ||
2168 | */ | ||
2169 | static __inline unsigned int | ||
2170 | hv_pte_get_pfn(const HV_PTE pte) | ||
2171 | { | ||
2172 | return pte.val >> HV_PTE_INDEX_PFN; | ||
2173 | } | ||
2174 | |||
2175 | |||
2176 | /** Set the page frame number into a PTE. See hv_pte_get_pfn. */ | ||
2177 | static __inline HV_PTE | ||
2178 | hv_pte_set_pfn(HV_PTE pte, unsigned int val) | ||
2179 | { | ||
2180 | /* | ||
2181 | * Note that the use of "PTFN" in the next line is intentional; we | ||
2182 | * don't want any garbage lower bits left in that field. | ||
2183 | */ | ||
2184 | pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN); | ||
2185 | pte.val |= (__hv64) val << HV_PTE_INDEX_PFN; | ||
2186 | return pte; | ||
2187 | } | ||
2188 | |||
2189 | /** Get the page table frame number from the PTE. | ||
2190 | * | ||
2191 | * This field contains the upper bits of the CPA (client physical | ||
2192 | * address) of the target page table; the complete CPA is this field with | ||
2193 | * with HV_PAGE_TABLE_ALIGN zero bits appended to it. | ||
2194 | * | ||
2195 | * For PTEs in a level-1 page table when the Page bit is not set, the | ||
2196 | * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and | ||
2197 | * the level-2 page table size. | ||
2198 | */ | ||
2199 | static __inline unsigned long | ||
2200 | hv_pte_get_ptfn(const HV_PTE pte) | ||
2201 | { | ||
2202 | return pte.val >> HV_PTE_INDEX_PTFN; | ||
2203 | } | ||
2204 | |||
2205 | |||
2206 | /** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ | ||
2207 | static __inline HV_PTE | ||
2208 | hv_pte_set_ptfn(HV_PTE pte, unsigned long val) | ||
2209 | { | ||
2210 | pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS)-1) << HV_PTE_INDEX_PTFN); | ||
2211 | pte.val |= (__hv64) val << HV_PTE_INDEX_PTFN; | ||
2212 | return pte; | ||
2213 | } | ||
2214 | |||
2215 | |||
2216 | /** Get the remote tile caching this page. | ||
2217 | * | ||
2218 | * Specifies the remote tile which is providing the L3 cache for this page. | ||
2219 | * | ||
2220 | * This field is ignored unless the page mode is HV_PTE_MODE_CACHE_TILE_L3. | ||
2221 | * | ||
2222 | * In level-1 PTEs, if the Page bit is clear, this field determines how the | ||
2223 | * level-2 page table is accessed. | ||
2224 | */ | ||
2225 | static __inline unsigned int | ||
2226 | hv_pte_get_lotar(const HV_PTE pte) | ||
2227 | { | ||
2228 | unsigned int lotar = ((__hv32) pte.val) >> HV_PTE_INDEX_LOTAR; | ||
2229 | |||
2230 | return HV_XY_TO_LOTAR( (lotar >> (HV_PTE_LOTAR_BITS / 2)), | ||
2231 | (lotar & ((1 << (HV_PTE_LOTAR_BITS / 2)) - 1)) ); | ||
2232 | } | ||
2233 | |||
2234 | |||
2235 | /** Set the remote tile caching a page into a PTE. See hv_pte_get_lotar. */ | ||
2236 | static __inline HV_PTE | ||
2237 | hv_pte_set_lotar(HV_PTE pte, unsigned int val) | ||
2238 | { | ||
2239 | unsigned int x = HV_LOTAR_X(val); | ||
2240 | unsigned int y = HV_LOTAR_Y(val); | ||
2241 | |||
2242 | pte.val &= ~(((1ULL << HV_PTE_LOTAR_BITS)-1) << HV_PTE_INDEX_LOTAR); | ||
2243 | pte.val |= (x << (HV_PTE_INDEX_LOTAR + HV_PTE_LOTAR_BITS / 2)) | | ||
2244 | (y << HV_PTE_INDEX_LOTAR); | ||
2245 | return pte; | ||
2246 | } | ||
2247 | |||
2248 | #endif /* !__ASSEMBLER__ */ | ||
2249 | |||
2250 | /** Converts a client physical address to a pfn. */ | ||
2251 | #define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL) | ||
2252 | |||
2253 | /** Converts a pfn to a client physical address. */ | ||
2254 | #define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL) | ||
2255 | |||
2256 | /** Converts a client physical address to a ptfn. */ | ||
2257 | #define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) | ||
2258 | |||
2259 | /** Converts a ptfn to a client physical address. */ | ||
2260 | #define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) | ||
2261 | |||
2262 | /** Converts a ptfn to a pfn. */ | ||
2263 | #define HV_PTFN_TO_PFN(p) \ | ||
2264 | ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) | ||
2265 | |||
2266 | /** Converts a pfn to a ptfn. */ | ||
2267 | #define HV_PFN_TO_PTFN(p) \ | ||
2268 | ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN)) | ||
2269 | |||
2270 | #if CHIP_VA_WIDTH() > 32 | ||
2271 | |||
2272 | /** Log number of HV_PTE entries in L0 page table */ | ||
2273 | #define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) | ||
2274 | |||
2275 | /** Number of HV_PTE entries in L0 page table */ | ||
2276 | #define HV_L0_ENTRIES (1 << HV_LOG2_L0_ENTRIES) | ||
2277 | |||
2278 | /** Log size of L0 page table in bytes */ | ||
2279 | #define HV_LOG2_L0_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L0_ENTRIES) | ||
2280 | |||
2281 | /** Size of L0 page table in bytes */ | ||
2282 | #define HV_L0_SIZE (1 << HV_LOG2_L0_SIZE) | ||
2283 | |||
2284 | #ifdef __ASSEMBLER__ | ||
2285 | |||
2286 | /** Index in L0 for a specific VA */ | ||
2287 | #define HV_L0_INDEX(va) \ | ||
2288 | (((va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) | ||
2289 | |||
2290 | #else | ||
2291 | |||
2292 | /** Index in L1 for a specific VA */ | ||
2293 | #define HV_L0_INDEX(va) \ | ||
2294 | (((HV_VirtAddr)(va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) | ||
2295 | |||
2296 | #endif | ||
2297 | |||
2298 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2299 | |||
2300 | /** Log number of HV_PTE entries in L1 page table */ | ||
2301 | #define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) | ||
2302 | |||
2303 | /** Number of HV_PTE entries in L1 page table */ | ||
2304 | #define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) | ||
2305 | |||
2306 | /** Log size of L1 page table in bytes */ | ||
2307 | #define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) | ||
2308 | |||
2309 | /** Size of L1 page table in bytes */ | ||
2310 | #define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) | ||
2311 | |||
2312 | /** Log number of HV_PTE entries in level-2 page table */ | ||
2313 | #define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) | ||
2314 | |||
2315 | /** Number of HV_PTE entries in level-2 page table */ | ||
2316 | #define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) | ||
2317 | |||
2318 | /** Log size of level-2 page table in bytes */ | ||
2319 | #define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) | ||
2320 | |||
2321 | /** Size of level-2 page table in bytes */ | ||
2322 | #define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) | ||
2323 | |||
2324 | #ifdef __ASSEMBLER__ | ||
2325 | |||
2326 | #if CHIP_VA_WIDTH() > 32 | ||
2327 | |||
2328 | /** Index in L1 for a specific VA */ | ||
2329 | #define HV_L1_INDEX(va) \ | ||
2330 | (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) | ||
2331 | |||
2332 | #else /* CHIP_VA_WIDTH() > 32 */ | ||
2333 | |||
2334 | /** Index in L1 for a specific VA */ | ||
2335 | #define HV_L1_INDEX(va) \ | ||
2336 | (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) | ||
2337 | |||
2338 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2339 | |||
2340 | /** Index in level-2 page table for a specific VA */ | ||
2341 | #define HV_L2_INDEX(va) \ | ||
2342 | (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) | ||
2343 | |||
2344 | #else /* __ASSEMBLER __ */ | ||
2345 | |||
2346 | #if CHIP_VA_WIDTH() > 32 | ||
2347 | |||
2348 | /** Index in L1 for a specific VA */ | ||
2349 | #define HV_L1_INDEX(va) \ | ||
2350 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) | ||
2351 | |||
2352 | #else /* CHIP_VA_WIDTH() > 32 */ | ||
2353 | |||
2354 | /** Index in L1 for a specific VA */ | ||
2355 | #define HV_L1_INDEX(va) \ | ||
2356 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) | ||
2357 | |||
2358 | #endif /* CHIP_VA_WIDTH() > 32 */ | ||
2359 | |||
2360 | /** Index in level-2 page table for a specific VA */ | ||
2361 | #define HV_L2_INDEX(va) \ | ||
2362 | (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) | ||
2363 | |||
2364 | #endif /* __ASSEMBLER __ */ | ||
2365 | |||
2366 | #endif /* _TILE_HV_H */ | ||
diff --git a/arch/tile/include/hv/syscall_public.h b/arch/tile/include/hv/syscall_public.h new file mode 100644 index 000000000000..9cc0837e69fd --- /dev/null +++ b/arch/tile/include/hv/syscall_public.h | |||
@@ -0,0 +1,42 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | /** | ||
16 | * @file syscall.h | ||
17 | * Indices for the hypervisor system calls that are intended to be called | ||
18 | * directly, rather than only through hypervisor-generated "glue" code. | ||
19 | */ | ||
20 | |||
21 | #ifndef _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H | ||
22 | #define _SYS_HV_INCLUDE_SYSCALL_PUBLIC_H | ||
23 | |||
24 | /** Fast syscall flag bit location. When this bit is set, the hypervisor | ||
25 | * handles the syscall specially. | ||
26 | */ | ||
27 | #define HV_SYS_FAST_SHIFT 14 | ||
28 | |||
29 | /** Fast syscall flag bit mask. */ | ||
30 | #define HV_SYS_FAST_MASK (1 << HV_SYS_FAST_SHIFT) | ||
31 | |||
32 | /** Bit location for flagging fast syscalls that can be called from PL0. */ | ||
33 | #define HV_SYS_FAST_PLO_SHIFT 13 | ||
34 | |||
35 | /** Fast syscall allowing PL0 bit mask. */ | ||
36 | #define HV_SYS_FAST_PL0_MASK (1 << HV_SYS_FAST_PLO_SHIFT) | ||
37 | |||
38 | /** Perform an MF that waits for all victims to reach DRAM. */ | ||
39 | #define HV_SYS_fence_incoherent (51 | HV_SYS_FAST_MASK \ | ||
40 | | HV_SYS_FAST_PL0_MASK) | ||
41 | |||
42 | #endif /* !_SYS_HV_INCLUDE_SYSCALL_PUBLIC_H */ | ||