diff options
author | David S. Miller <davem@davemloft.net> | 2006-02-04 06:01:45 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:11:37 -0500 |
commit | 766f861fbbd968a1850295ed6dec4504b4500dcc (patch) | |
tree | 76729285f448b58c812469b1bddf64f92e9f8d6e /include/asm-sparc64/hypervisor.h | |
parent | 314ef6859750b6539eac48d78059bb7986f29cb1 (diff) |
[SPARC64]: SUN4V hypervisor interface defines.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/hypervisor.h')
-rw-r--r-- | include/asm-sparc64/hypervisor.h | 2072 |
1 files changed, 2072 insertions, 0 deletions
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h new file mode 100644 index 000000000000..9c8e453abe97 --- /dev/null +++ b/include/asm-sparc64/hypervisor.h | |||
@@ -0,0 +1,2072 @@ | |||
1 | #ifndef _SPARC64_HYPERVISOR_H | ||
2 | #define _SPARC64_HYPERVISOR_H | ||
3 | |||
4 | /* Sun4v hypervisor interfaces and defines. | ||
5 | * | ||
6 | * Hypervisor calls are made via traps to software traps number 0x80 | ||
7 | * and above. Registers %o0 to %o5 serve as argument, status, and | ||
8 | * return value registers. | ||
9 | * | ||
10 | * There are two kinds of these traps. First there are the normal | ||
11 | * "fast traps" which use software trap 0x80 and encode the function | ||
12 | * to invoke by number in register %o5. Argument and return value | ||
13 | * handling is as follows: | ||
14 | * | ||
15 | * ----------------------------------------------- | ||
16 | * | %o5 | function number | undefined | | ||
17 | * | %o0 | argument 0 | return status | | ||
18 | * | %o1 | argument 1 | return value 1 | | ||
19 | * | %o2 | argument 2 | return value 2 | | ||
20 | * | %o3 | argument 3 | return value 3 | | ||
21 | * | %o4 | argument 4 | return value 4 | | ||
22 | * ----------------------------------------------- | ||
23 | * | ||
24 | * The second type are "hyper-fast traps" which encode the function | ||
25 | * number in the software trap number itself. So these use trap | ||
26 | * numbers > 0x80. The register usage for hyper-fast traps is as | ||
27 | * follows: | ||
28 | * | ||
29 | * ----------------------------------------------- | ||
30 | * | %o0 | argument 0 | return status | | ||
31 | * | %o1 | argument 1 | return value 1 | | ||
32 | * | %o2 | argument 2 | return value 2 | | ||
33 | * | %o3 | argument 3 | return value 3 | | ||
34 | * | %o4 | argument 4 | return value 4 | | ||
35 | * ----------------------------------------------- | ||
36 | * | ||
37 | * Registers providing explicit arguments to the hypervisor calls | ||
38 | * are volatile across the call. Upon return their values are | ||
39 | * undefined unless explicitly specified as containing a particular | ||
40 | * return value by the specific call. The return status is always | ||
41 | * returned in register %o0, zero indicates a successful execution of | ||
42 | * the hypervisor call and other values indicate an error status as | ||
43 | * defined below. So, for example, if a hyper-fast trap takes | ||
44 | * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across | ||
45 | * the call and %o3, %o4, and %o5 would be preserved. | ||
46 | * | ||
47 | * If the hypervisor trap is invalid, or the fast trap function number | ||
48 | * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits | ||
49 | * of the argument and return values are significant. | ||
50 | */ | ||
51 | |||
52 | /* Trap numbers. */ | ||
53 | #define HV_FAST_TRAP 0x80 | ||
54 | #define HV_MMU_MAP_ADDR_TRAP 0x83 | ||
55 | #define HV_MMU_UNMAP_ADDR_TRAP 0x84 | ||
56 | #define HV_TTRACE_ADDENTRY_TRAP 0x85 | ||
57 | #define HV_CORE_TRAP 0xff | ||
58 | |||
59 | /* Error codes. */ | ||
60 | #define HV_EOK 0 /* Successful return */ | ||
61 | #define HV_ENOCPU 1 /* Invalid CPU id */ | ||
62 | #define HV_ENORADDR 2 /* Invalid real address */ | ||
63 | #define HV_ENOINTR 3 /* Invalid interrupt id */ | ||
64 | #define HV_EBADPGSZ 4 /* Invalid pagesize encoding */ | ||
65 | #define HV_EBADTSB 5 /* Invalid TSB description */ | ||
66 | #define HV_EINVAL 6 /* Invalid argument */ | ||
67 | #define HV_EBADTRAP 7 /* Invalid function number */ | ||
68 | #define HV_EBADALIGN 8 /* Invalid address alignment */ | ||
69 | #define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */ | ||
70 | #define HV_ENOACCESS 10 /* No access to resource */ | ||
71 | #define HV_EIO 11 /* I/O error */ | ||
72 | #define HV_ECPUERROR 12 /* CPU in error state */ | ||
73 | #define HV_ENOTSUPPORTED 13 /* Function not supported */ | ||
74 | #define HV_ENOMAP 14 /* No mapping found */ | ||
75 | #define HV_ETOOMANY 15 /* Too many items specified */ | ||
76 | |||
77 | /* mach_exit() | ||
78 | * TRAP: HV_FAST_TRAP | ||
79 | * FUNCTION: HV_FAST_MACH_EXIT | ||
80 | * ARG0: exit code | ||
81 | * ERRORS: This service does not return. | ||
82 | * | ||
83 | * Stop all CPUs in the virtual domain and place them into the stopped | ||
84 | * state. The 64-bit exit code may be passed to a service entity as | ||
85 | * the domain's exit status. On systems without a service entity, the | ||
86 | * domain will undergo a reset, and the boot firmware will be | ||
87 | * reloaded. | ||
88 | * | ||
89 | * This function will never return to the guest that invokes it. | ||
90 | * | ||
91 | * Note: By convention an exit code of zero denotes a successful exit by | ||
92 | * the guest code. A non-zero exit code denotes a guest specific | ||
93 | * error indication. | ||
94 | * | ||
95 | */ | ||
96 | #define HV_FAST_MACH_EXIT 0x00 | ||
97 | |||
98 | /* Domain services. */ | ||
99 | |||
100 | /* mach_desc() | ||
101 | * TRAP: HV_FAST_TRAP | ||
102 | * FUNCTION: HV_FAST_MACH_DESC | ||
103 | * ARG0: buffer | ||
104 | * ARG1: length | ||
105 | * RET0: status | ||
106 | * RET1: length | ||
107 | * ERRORS: HV_EBADALIGN Buffer is badly aligned | ||
108 | * HV_ENORADDR Buffer is to an illegal real address. | ||
109 | * HV_EINVAL Buffer length is too small for complete | ||
110 | * machine description. | ||
111 | * | ||
112 | * Copy the most current machine description into the buffer indicated | ||
113 | * by the real address in ARG0. The buffer provided must be 16 byte | ||
114 | * aligned. Upon success or HV_EINVAL, this service returns the | ||
115 | * actual size of the machine description in the RET1 return value. | ||
116 | * | ||
117 | * Note: A method of determining the appropriate buffer size for the | ||
118 | * machine description is to first call this service with a buffer | ||
119 | * length of 0 bytes. | ||
120 | */ | ||
121 | #define HV_FAST_MACH_DESC 0x01 | ||
122 | |||
123 | /* mach_exit() | ||
124 | * TRAP: HV_FAST_TRAP | ||
125 | * FUNCTION: HV_FAST_MACH_SIR | ||
126 | * ERRORS: This service does not return. | ||
127 | * | ||
128 | * Perform a software initiated reset of the virtual machine domain. | ||
129 | * All CPUs are captured as soon as possible, all hardware devices are | ||
130 | * returned to the entry default state, and the domain is restarted at | ||
131 | * the SIR (trap type 0x04) real trap table (RTBA) entry point on one | ||
132 | * of the CPUs. The single CPU restarted is selected as determined by | ||
133 | * platform specific policy. Memory is preserved across this | ||
134 | * operation. | ||
135 | */ | ||
136 | #define HV_FAST_MACH_SIR 0x02 | ||
137 | |||
138 | /* mach_set_soft_state() | ||
139 | * TRAP: HV_FAST_TRAP | ||
140 | * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE | ||
141 | * ARG0: software state | ||
142 | * ARG1: software state description pointer | ||
143 | * RET0: status | ||
144 | * ERRORS: EINVAL software state not valid or software state | ||
145 | * description is not NULL terminated | ||
146 | * ENORADDR software state description pointer is not a | ||
147 | * valid real address | ||
148 | * EBADALIGNED software state description is not correctly | ||
149 | * aligned | ||
150 | * | ||
151 | * This allows the guest to report it's soft state to the hypervisor. There | ||
152 | * are two primary components to this state. The first part states whether | ||
153 | * the guest software is running or not. The second containts optional | ||
154 | * details specific to the software. | ||
155 | * | ||
156 | * The software state argument is defined below in HV_SOFT_STATE_*, and | ||
157 | * indicates whether the guest is operating normally or in a transitional | ||
158 | * state. | ||
159 | * | ||
160 | * The software state description argument is a real address of a data buffer | ||
161 | * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL | ||
162 | * terminated 7-bit ASCII string of up to 31 characters not including the | ||
163 | * NULL termination. | ||
164 | */ | ||
165 | #define HV_FAST_MACH_SET_SOFT_STATE 0x03 | ||
166 | #define HV_SOFT_STATE_NORMAL 0x01 | ||
167 | #define HV_SOFT_STATE_TRANSITION 0x02 | ||
168 | |||
169 | /* mach_get_soft_state() | ||
170 | * TRAP: HV_FAST_TRAP | ||
171 | * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE | ||
172 | * ARG0: software state description pointer | ||
173 | * RET0: status | ||
174 | * RET1: software state | ||
175 | * ERRORS: ENORADDR software state description pointer is not a | ||
176 | * valid real address | ||
177 | * EBADALIGNED software state description is not correctly | ||
178 | * aligned | ||
179 | * | ||
180 | * Retrieve the current value of the guest's software state. The rules | ||
181 | * for the software state pointer are the same as for mach_set_soft_state() | ||
182 | * above. | ||
183 | */ | ||
184 | #define HV_FAST_MACH_GET_SOFT_STATE 0x04 | ||
185 | |||
186 | /* CPU services. | ||
187 | * | ||
188 | * CPUs represent devices that can execute software threads. A single | ||
189 | * chip that contains multiple cores or strands is represented as | ||
190 | * multiple CPUs with unique CPU identifiers. CPUs are exported to | ||
191 | * OBP via the machine description (and to the OS via the OBP device | ||
192 | * tree). CPUs are always in one of three states: stopped, running, | ||
193 | * or error. | ||
194 | * | ||
195 | * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a | ||
196 | * CPU within a logical domain. Operations that are to be performed | ||
197 | * on multiple CPUs specify them via a CPU list. A CPU list is an | ||
198 | * array in real memory, of which each 16-bit word is a CPU ID. CPU | ||
199 | * lists are passed through the API as two arguments. The first is | ||
200 | * the number of entries (16-bit words) in the CPU list, and the | ||
201 | * second is the (real address) pointer to the CPU ID list. | ||
202 | */ | ||
203 | |||
204 | /* cpu_start() | ||
205 | * TRAP: HV_FAST_TRAP | ||
206 | * FUNCTION: HV_FAST_CPU_START | ||
207 | * ARG0: CPU ID | ||
208 | * ARG1: PC | ||
209 | * ARG1: RTBA | ||
210 | * ARG1: target ARG0 | ||
211 | * RET0: status | ||
212 | * ERRORS: ENOCPU Invalid CPU ID | ||
213 | * EINVAL Target CPU ID is not in the stopped state | ||
214 | * ENORADDR Invalid PC or RTBA real address | ||
215 | * EBADALIGN Unaligned PC or unaligned RTBA | ||
216 | * EWOULDBLOCK Starting resources are not available | ||
217 | * | ||
218 | * Start CPU with given CPU ID with PC in %pc and with a real trap | ||
219 | * base address value of RTBA. The indicated CPU must be in the | ||
220 | * stopped state. The supplied RTBA must be aligned on a 256 byte | ||
221 | * boundary. On successful completion, the specified CPU will be in | ||
222 | * the running state and will be supplied with "target ARG0" in %o0 | ||
223 | * and RTBA in %tba. | ||
224 | */ | ||
225 | #define HV_FAST_CPU_START 0x10 | ||
226 | |||
227 | /* cpu_stop() | ||
228 | * TRAP: HV_FAST_TRAP | ||
229 | * FUNCTION: HV_FAST_CPU_STOP | ||
230 | * ARG0: CPU ID | ||
231 | * RET0: status | ||
232 | * ERRORS: ENOCPU Invalid CPU ID | ||
233 | * EINVAL Target CPU ID is the current cpu | ||
234 | * EINVAL Target CPU ID is not in the running state | ||
235 | * EWOULDBLOCK Stopping resources are not available | ||
236 | * ENOTSUPPORTED Not supported on this platform | ||
237 | * | ||
238 | * The specified CPU is stopped. The indicated CPU must be in the | ||
239 | * running state. On completion, it will be in the stopped state. It | ||
240 | * is not legal to stop the current CPU. | ||
241 | * | ||
242 | * Note: As this service cannot be used to stop the current cpu, this service | ||
243 | * may not be used to stop the last running CPU in a domain. To stop | ||
244 | * and exit a running domain, a guest must use the mach_exit() service. | ||
245 | */ | ||
246 | #define HV_FAST_CPU_STOP 0x11 | ||
247 | |||
248 | /* cpu_yield() | ||
249 | * TRAP: HV_FAST_TRAP | ||
250 | * FUNCTION: HV_FAST_CPU_YIELD | ||
251 | * RET0: status | ||
252 | * ERRORS: No possible error. | ||
253 | * | ||
254 | * Suspend execution on the current CPU. Execution will resume when | ||
255 | * an interrupt (device, %stick_compare, or cross-call) is targeted to | ||
256 | * the CPU. On some CPUs, this API may be used by the hypervisor to | ||
257 | * save power by disabling hardware strands. | ||
258 | */ | ||
259 | #define HV_FAST_CPU_YIELD 0x12 | ||
260 | |||
261 | |||
262 | /* cpu_qconf() | ||
263 | * TRAP: HV_FAST_TRAP | ||
264 | * FUNCTION: HV_FAST_CPU_QCONF | ||
265 | * ARG0: queue | ||
266 | * ARG1: base real address | ||
267 | * ARG2: number of entries | ||
268 | * RET0: status | ||
269 | * ERRORS: ENORADDR Invalid base real address | ||
270 | * EINVAL Invalid queue or number of entries is less | ||
271 | * than 2 or too large. | ||
272 | * EBADALIGN Base real address is not correctly aligned | ||
273 | * for size. | ||
274 | * | ||
275 | * Configure the given queue to be placed at the givem base real | ||
276 | * address, with the given number of entries. The number of entries | ||
277 | * must be a power of 2. The base real address must be aligned | ||
278 | * exactly to match the queue size. Each queue entry is 64 bytes | ||
279 | * long, so for example a 32 entry queue must be aligned on a 2048 | ||
280 | * byte real address boundary. | ||
281 | * | ||
282 | * The specified queue is unconfigured is number of entries is given as zero. | ||
283 | * | ||
284 | * For the current version of this API service, the argument queue is defined | ||
285 | * as follows: | ||
286 | * queue description | ||
287 | * ----- ------------------------- | ||
288 | * 0x3c cpu mondo queue | ||
289 | * 0x3d device mondo queue | ||
290 | * 0x3e resumable error queue | ||
291 | * 0x3f non-resumable error queue | ||
292 | * | ||
293 | * Note: The maximum number of entries for each queue for a specific cpu may | ||
294 | * be determined from the machine description. | ||
295 | */ | ||
296 | #define HV_FAST_CPU_QCONF 0x14 | ||
297 | #define HV_CPU_QUEUE_CPU_MONDO 0x3c | ||
298 | #define HV_CPU_QUEUE_DEVICE_MONDO 0x3d | ||
299 | #define HV_CPU_QUEUE_RES_ERROR 0x3e | ||
300 | #define HV_CPU_QUEUE_NONRES_ERROR 0x3f | ||
301 | |||
302 | /* cpu_qinfo() | ||
303 | * TRAP: HV_FAST_TRAP | ||
304 | * FUNCTION: HV_FAST_CPU_QINFO | ||
305 | * ARG0: queue | ||
306 | * RET0: status | ||
307 | * RET1: base real address | ||
308 | * RET1: number of entries | ||
309 | * ERRORS: EINVAL Invalid queue | ||
310 | * | ||
311 | * Return the configuration info for the given queue. The base real | ||
312 | * address and number of entries of the defined queue are returned. | ||
313 | * The queue argument values are the same as for cpu_qconf() above. | ||
314 | * | ||
315 | * If the specified queue is a valid queue number, but no queue has | ||
316 | * been defined, the number of entries will be set to zero and the | ||
317 | * base real address returned is undefined. | ||
318 | */ | ||
319 | #define HV_FAST_CPU_QINFO 0x15 | ||
320 | |||
321 | /* cpu_mondo_send() | ||
322 | * TRAP: HV_FAST_TRAP | ||
323 | * FUNCTION: HV_FAST_CPU_MONDO_SEND | ||
324 | * ARG0-1: CPU list | ||
325 | * ARG2: data real address | ||
326 | * RET0: status | ||
327 | * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list | ||
328 | * is not 2-byte aligned. | ||
329 | * ENORADDR Invalid data mondo address, or invalid cpu list | ||
330 | * address. | ||
331 | * ENOCPU Invalid cpu in CPU list | ||
332 | * EWOULDBLOCK Some or all of the listed CPUs did not receive | ||
333 | * the mondo | ||
334 | * EINVAL CPU list includes caller's CPU ID | ||
335 | * | ||
336 | * Send a mondo interrupt to the CPUs in the given CPU list with the | ||
337 | * 64-bytes at the given data real address. The data must be 64-byte | ||
338 | * aligned. The mondo data will be delivered to the cpu_mondo queues | ||
339 | * of the recipient CPUs. | ||
340 | * | ||
341 | * In all cases, error or not, the CPUs in the CPU list to which the | ||
342 | * mondo has been successfully delivered will be indicated by having | ||
343 | * their entry in CPU list updated with the value 0xffff. | ||
344 | */ | ||
345 | #define HV_FAST_CPU_MONDO_SEND 0x42 | ||
346 | |||
347 | /* cpu_myid() | ||
348 | * TRAP: HV_FAST_TRAP | ||
349 | * FUNCTION: HV_FAST_CPU_MYID | ||
350 | * RET0: status | ||
351 | * RET1: CPU ID | ||
352 | * ERRORS: No errors defined. | ||
353 | * | ||
354 | * Return the hypervisor ID handle for the current CPU. Use by a | ||
355 | * virtual CPU to discover it's own identity. | ||
356 | */ | ||
357 | #define HV_FAST_CPU_MYID 0x16 | ||
358 | |||
359 | /* cpu_state() | ||
360 | * TRAP: HV_FAST_TRAP | ||
361 | * FUNCTION: HV_FAST_CPU_STATE | ||
362 | * ARG0: CPU ID | ||
363 | * RET0: status | ||
364 | * RET1: state | ||
365 | * ERRORS: ENOCPU Invalid CPU ID | ||
366 | * | ||
367 | * Retrieve the current state of the CPU with the given CPU ID. | ||
368 | */ | ||
369 | #define HV_FAST_CPU_STATE 0x17 | ||
370 | #define HV_CPU_STATE_STOPPED 0x01 | ||
371 | #define HV_CPU_STATE_RUNNING 0x02 | ||
372 | #define HV_CPU_STATE_ERROR 0x03 | ||
373 | |||
374 | /* cpu_set_rtba() | ||
375 | * TRAP: HV_FAST_TRAP | ||
376 | * FUNCTION: HV_FAST_CPU_SET_RTBA | ||
377 | * ARG0: RTBA | ||
378 | * RET0: status | ||
379 | * RET1: previous RTBA | ||
380 | * ERRORS: ENORADDR Invalid RTBA real address | ||
381 | * EBADALIGN RTBA is incorrectly aligned for a trap table | ||
382 | * | ||
383 | * Set the real trap base address of the local cpu to the given RTBA. | ||
384 | * The supplied RTBA must be aligned on a 256 byte boundary. Upon | ||
385 | * success the previous value of the RTBA is returned in RET1. | ||
386 | * | ||
387 | * Note: This service does not affect %tba | ||
388 | */ | ||
389 | #define HV_FAST_CPU_SET_RTBA 0x18 | ||
390 | |||
391 | /* cpu_set_rtba() | ||
392 | * TRAP: HV_FAST_TRAP | ||
393 | * FUNCTION: HV_FAST_CPU_GET_RTBA | ||
394 | * RET0: status | ||
395 | * RET1: previous RTBA | ||
396 | * ERRORS: No possible error. | ||
397 | * | ||
398 | * Returns the current value of RTBA in RET1. | ||
399 | */ | ||
400 | #define HV_FAST_CPU_GET_RTBA 0x19 | ||
401 | |||
402 | /* MMU services. | ||
403 | * | ||
404 | * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls. | ||
405 | */ | ||
406 | #ifndef __ASSEMBLY__ | ||
407 | struct hv_tsb_descr { | ||
408 | unsigned short pgsz_idx; | ||
409 | unsigned short assoc; | ||
410 | unsigned int num_ttes; /* in TTEs */ | ||
411 | unsigned int ctx_idx; | ||
412 | unsigned int pgsz_mask; | ||
413 | unsigned long tsb_base; | ||
414 | unsigned long resv; | ||
415 | }; | ||
416 | #endif | ||
417 | #define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00 | ||
418 | #define HV_TSB_DESCR_ASSOC_OFFSET 0x02 | ||
419 | #define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04 | ||
420 | #define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08 | ||
421 | #define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c | ||
422 | #define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10 | ||
423 | #define HV_TSB_DESCR_RESV_OFFSET 0x18 | ||
424 | |||
425 | /* Page size bitmask. */ | ||
426 | #define HV_PGSZ_MASK_8K (1 << 0) | ||
427 | #define HV_PGSZ_MASK_64K (1 << 1) | ||
428 | #define HV_PGSZ_MASK_512K (1 << 2) | ||
429 | #define HV_PGSZ_MASK_4MB (1 << 3) | ||
430 | #define HV_PGSZ_MASK_32MB (1 << 4) | ||
431 | #define HV_PGSZ_MASK_256MB (1 << 5) | ||
432 | #define HV_PGSZ_MASK_2GB (1 << 6) | ||
433 | #define HV_PGSZ_MASK_16GB (1 << 7) | ||
434 | |||
435 | /* Page size index. The value given in the TSB descriptor must correspond | ||
436 | * to the smallest page size specified in the pgsz_mask page size bitmask. | ||
437 | */ | ||
438 | #define HV_PGSZ_IDX_8K 0 | ||
439 | #define HV_PGSZ_IDX_64K 1 | ||
440 | #define HV_PGSZ_IDX_512K 2 | ||
441 | #define HV_PGSZ_IDX_4MB 3 | ||
442 | #define HV_PGSZ_IDX_32MB 4 | ||
443 | #define HV_PGSZ_IDX_256MB 5 | ||
444 | #define HV_PGSZ_IDX_2GB 6 | ||
445 | #define HV_PGSZ_IDX_16GB 7 | ||
446 | |||
447 | /* MMU fault status area. | ||
448 | * | ||
449 | * MMU related faults have their status and fault address information | ||
450 | * placed into a memory region made available by privileged code. Each | ||
451 | * virtual processor must make a mmu_fault_area_conf() call to tell the | ||
452 | * hypervisor where that processor's fault status should be stored. | ||
453 | * | ||
454 | * The fault status block is a multiple of 64-bytes and must be aligned | ||
455 | * on a 64-byte boundary. | ||
456 | */ | ||
457 | #ifndef __ASSEMBLY__ | ||
458 | struct hv_fault_status { | ||
459 | unsigned long i_fault_type; | ||
460 | unsigned long i_fault_addr; | ||
461 | unsigned long i_fault_ctx; | ||
462 | unsigned long i_reserved[5]; | ||
463 | unsigned long d_fault_type; | ||
464 | unsigned long d_fault_addr; | ||
465 | unsigned long d_fault_ctx; | ||
466 | unsigned long d_reserved[5]; | ||
467 | }; | ||
468 | #endif | ||
469 | #define HV_FAULT_I_TYPE_OFFSET 0x00 | ||
470 | #define HV_FAULT_I_ADDR_OFFSET 0x08 | ||
471 | #define HV_FAULT_I_CTX_OFFSET 0x10 | ||
472 | #define HV_FAULT_D_TYPE_OFFSET 0x40 | ||
473 | #define HV_FAULT_D_ADDR_OFFSET 0x48 | ||
474 | #define HV_FAULT_D_CTX_OFFSET 0x50 | ||
475 | |||
476 | #define HV_FAULT_TYPE_FAST_MISS 1 | ||
477 | #define HV_FAULT_TYPE_FAST_PROT 2 | ||
478 | #define HV_FAULT_TYPE_MMU_MISS 3 | ||
479 | #define HV_FAULT_TYPE_INV_RA 4 | ||
480 | #define HV_FAULT_TYPE_PRIV_VIOL 5 | ||
481 | #define HV_FAULT_TYPE_PROT_VIOL 6 | ||
482 | #define HV_FAULT_TYPE_NFO 7 | ||
483 | #define HV_FAULT_TYPE_NFO_SEFF 8 | ||
484 | #define HV_FAULT_TYPE_INV_VA 9 | ||
485 | #define HV_FAULT_TYPE_INV_ASI 10 | ||
486 | #define HV_FAULT_TYPE_NC_ATOMIC 11 | ||
487 | #define HV_FAULT_TYPE_PRIV_ACT 12 | ||
488 | #define HV_FAULT_TYPE_RESV1 13 | ||
489 | #define HV_FAULT_TYPE_UNALIGNED 14 | ||
490 | #define HV_FAULT_TYPE_INV_PGSZ 15 | ||
491 | /* Values 16 --> -2 are reserved. */ | ||
492 | #define HV_FAULT_TYPE_MULTIPLE -1 | ||
493 | |||
494 | /* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(), | ||
495 | * and mmu_{map,unmap}_perm_addr(). | ||
496 | */ | ||
497 | #define HV_MMU_DMMU 0x01 | ||
498 | #define HV_MMU_IMMU 0x02 | ||
499 | #define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU) | ||
500 | |||
501 | /* mmu_map_addr() | ||
502 | * TRAP: HV_MMU_MAP_ADDR_TRAP | ||
503 | * ARG0: virtual address | ||
504 | * ARG1: mmu context | ||
505 | * ARG2: TTE | ||
506 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
507 | * ERRORS: EINVAL Invalid virtual address, mmu context, or flags | ||
508 | * EBADPGSZ Invalid page size value | ||
509 | * ENORADDR Invalid real address in TTE | ||
510 | * | ||
511 | * Create a non-permanent mapping using the given TTE, virtual | ||
512 | * address, and mmu context. The flags argument determines which | ||
513 | * (data, or instruction, or both) TLB the mapping gets loaded into. | ||
514 | * | ||
515 | * The behavior is undefined if the valid bit is clear in the TTE. | ||
516 | * | ||
517 | * Note: This API call is for privileged code to specify temporary translation | ||
518 | * mappings without the need to create and manage a TSB. | ||
519 | */ | ||
520 | |||
521 | /* mmu_unmap_addr() | ||
522 | * TRAP: HV_MMU_UNMAP_ADDR_TRAP | ||
523 | * ARG0: virtual address | ||
524 | * ARG1: mmu context | ||
525 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
526 | * ERRORS: EINVAL Invalid virtual address, mmu context, or flags | ||
527 | * | ||
528 | * Demaps the given virtual address in the given mmu context on this | ||
529 | * CPU. This function is intended to be used to demap pages mapped | ||
530 | * with mmu_map_addr. This service is equivalent to invoking | ||
531 | * mmu_demap_page() with only the current CPU in the CPU list. The | ||
532 | * flags argument determines which (data, or instruction, or both) TLB | ||
533 | * the mapping gets unmapped from. | ||
534 | * | ||
535 | * Attempting to perform an unmap operation for a previously defined | ||
536 | * permanent mapping will have undefined results. | ||
537 | */ | ||
538 | |||
539 | /* mmu_tsb_ctx0() | ||
540 | * TRAP: HV_FAST_TRAP | ||
541 | * FUNCTION: HV_FAST_MMU_TSB_CTX0 | ||
542 | * ARG0: number of TSB descriptions | ||
543 | * ARG1: TSB descriptions pointer | ||
544 | * RET0: status | ||
545 | * ERRORS: ENORADDR Invalid TSB descriptions pointer or | ||
546 | * TSB base within a descriptor | ||
547 | * EBADALIGN TSB descriptions pointer is not aligned | ||
548 | * to an 8-byte boundary, or TSB base | ||
549 | * within a descriptor is not aligned for | ||
550 | * the given TSB size | ||
551 | * EBADPGSZ Invalid page size in a TSB descriptor | ||
552 | * EBADTSB Invalid associativity or size in a TSB | ||
553 | * descriptor | ||
554 | * EINVAL Invalid number of TSB descriptions, or | ||
555 | * invalid context index in a TSB | ||
556 | * descriptor, or index page size not | ||
557 | * equal to smallest page size in page | ||
558 | * size bitmask field. | ||
559 | * | ||
560 | * Configures the TSBs for the current CPU for virtual addresses with | ||
561 | * context zero. The TSB descriptions pointer is a pointer to an | ||
562 | * array of the given number of TSB descriptions. | ||
563 | * | ||
564 | * Note: The maximum number of TSBs available to a virtual CPU is given by the | ||
565 | * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the | ||
566 | * machine description. | ||
567 | */ | ||
568 | #define HV_FAST_MMU_TSB_CTX0 0x20 | ||
569 | |||
570 | /* mmu_tsb_ctxnon0() | ||
571 | * TRAP: HV_FAST_TRAP | ||
572 | * FUNCTION: HV_FAST_MMU_TSB_CTXNON0 | ||
573 | * ARG0: number of TSB descriptions | ||
574 | * ARG1: TSB descriptions pointer | ||
575 | * RET0: status | ||
576 | * ERRORS: Same as for mmu_tsb_ctx0() above. | ||
577 | * | ||
578 | * Configures the TSBs for the current CPU for virtual addresses with | ||
579 | * non-zero contexts. The TSB descriptions pointer is a pointer to an | ||
580 | * array of the given number of TSB descriptions. | ||
581 | * | ||
582 | * Note: A maximum of 16 TSBs may be specified in the TSB description list. | ||
583 | */ | ||
584 | #define HV_FAST_MMU_TSB_CTXNON0 0x21 | ||
585 | |||
586 | /* mmu_demap_page() | ||
587 | * TRAP: HV_FAST_TRAP | ||
588 | * FUNCTION: HV_FAST_MMU_DEMAP_PAGE | ||
589 | * ARG0: reserved, must be zero | ||
590 | * ARG1: reserved, must be zero | ||
591 | * ARG2: virtual address | ||
592 | * ARG3: mmu context | ||
593 | * ARG4: flags (HV_MMU_{IMMU,DMMU}) | ||
594 | * RET0: status | ||
595 | * ERRORS: EINVAL Invalid virutal address, context, or | ||
596 | * flags value | ||
597 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
598 | * | ||
599 | * Demaps any page mapping of the given virtual address in the given | ||
600 | * mmu context for the current virtual CPU. Any virtually tagged | ||
601 | * caches are guaranteed to be kept consistent. The flags argument | ||
602 | * determines which TLB (instruction, or data, or both) participate in | ||
603 | * the operation. | ||
604 | * | ||
605 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
606 | */ | ||
607 | #define HV_FAST_MMU_DEMAP_PAGE 0x22 | ||
608 | |||
609 | /* mmu_demap_ctx() | ||
610 | * TRAP: HV_FAST_TRAP | ||
611 | * FUNCTION: HV_FAST_MMU_DEMAP_CTX | ||
612 | * ARG0: reserved, must be zero | ||
613 | * ARG1: reserved, must be zero | ||
614 | * ARG2: mmu context | ||
615 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
616 | * RET0: status | ||
617 | * ERRORS: EINVAL Invalid context or flags value | ||
618 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
619 | * | ||
620 | * Demaps all non-permanent virtual page mappings previously specified | ||
621 | * for the given context for the current virtual CPU. Any virtual | ||
622 | * tagged caches are guaranteed to be kept consistent. The flags | ||
623 | * argument determines which TLB (instruction, or data, or both) | ||
624 | * participate in the operation. | ||
625 | * | ||
626 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
627 | */ | ||
628 | #define HV_FAST_MMU_DEMAP_CTX 0x23 | ||
629 | |||
630 | /* mmu_demap_all() | ||
631 | * TRAP: HV_FAST_TRAP | ||
632 | * FUNCTION: HV_FAST_MMU_DEMAP_ALL | ||
633 | * ARG0: reserved, must be zero | ||
634 | * ARG1: reserved, must be zero | ||
635 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
636 | * RET0: status | ||
637 | * ERRORS: EINVAL Invalid flags value | ||
638 | * ENOTSUPPORTED ARG0 or ARG1 is non-zero | ||
639 | * | ||
640 | * Demaps all non-permanent virtual page mappings previously specified | ||
641 | * for the current virtual CPU. Any virtual tagged caches are | ||
642 | * guaranteed to be kept consistent. The flags argument determines | ||
643 | * which TLB (instruction, or data, or both) participate in the | ||
644 | * operation. | ||
645 | * | ||
646 | * ARG0 and ARG1 are both reserved and must be set to zero. | ||
647 | */ | ||
648 | #define HV_FAST_MMU_DEMAP_ALL 0x24 | ||
649 | |||
650 | /* mmu_map_perm_addr() | ||
651 | * TRAP: HV_FAST_TRAP | ||
652 | * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR | ||
653 | * ARG0: virtual address | ||
654 | * ARG1: reserved, must be zero | ||
655 | * ARG2: TTE | ||
656 | * ARG3: flags (HV_MMU_{IMMU,DMMU}) | ||
657 | * RET0: status | ||
658 | * ERRORS: EINVAL Invalid virutal address or flags value | ||
659 | * EBADPGSZ Invalid page size value | ||
660 | * ENORADDR Invalid real address in TTE | ||
661 | * ETOOMANY Too many mappings (max of 8 reached) | ||
662 | * | ||
663 | * Create a permanent mapping using the given TTE and virtual address | ||
664 | * for context 0 on the calling virtual CPU. A maximum of 8 such | ||
665 | * permanent mappings may be specified by privileged code. Mappings | ||
666 | * may be removed with mmu_unmap_perm_addr(). | ||
667 | * | ||
668 | * The behavior is undefined if a TTE with the valid bit clear is given. | ||
669 | * | ||
670 | * Note: This call is used to specify address space mappings for which | ||
671 | * privileged code does not expect to receive misses. For example, | ||
672 | * this mechanism can be used to map kernel nucleus code and data. | ||
673 | */ | ||
674 | #define HV_FAST_MMU_MAP_PERM_ADDR 0x25 | ||
675 | |||
676 | /* mmu_fault_area_conf() | ||
677 | * TRAP: HV_FAST_TRAP | ||
678 | * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF | ||
679 | * ARG0: real address | ||
680 | * RET0: status | ||
681 | * RET1: previous mmu fault area real address | ||
682 | * ERRORS: ENORADDR Invalid real address | ||
683 | * EBADALIGN Invalid alignment for fault area | ||
684 | * | ||
685 | * Configure the MMU fault status area for the calling CPU. A 64-byte | ||
686 | * aligned real address specifies where MMU fault status information | ||
687 | * is placed. The return value is the previously specified area, or 0 | ||
688 | * for the first invocation. Specifying a fault area at real address | ||
689 | * 0 is not allowed. | ||
690 | */ | ||
691 | #define HV_FAST_MMU_FAULT_AREA_CONF 0x26 | ||
692 | |||
693 | /* mmu_enable() | ||
694 | * TRAP: HV_FAST_TRAP | ||
695 | * FUNCTION: HV_FAST_MMU_ENABLE | ||
696 | * ARG0: enable flag | ||
697 | * ARG1: return target address | ||
698 | * RET0: status | ||
699 | * ERRORS: ENORADDR Invalid real address when disabling | ||
700 | * translation. | ||
701 | * EBADALIGN The return target address is not | ||
702 | * aligned to an instruction. | ||
703 | * EINVAL The enable flag request the current | ||
704 | * operating mode (e.g. disable if already | ||
705 | * disabled) | ||
706 | * | ||
707 | * Enable or disable virtual address translation for the calling CPU | ||
708 | * within the virtual machine domain. If the enable flag is zero, | ||
709 | * translation is disabled, any non-zero value will enable | ||
710 | * translation. | ||
711 | * | ||
712 | * When this function returns, the newly selected translation mode | ||
713 | * will be active. If the mmu is being enabled, then the return | ||
714 | * target address is a virtual address else it is a real address. | ||
715 | * | ||
716 | * Upon successful completion, control will be returned to the given | ||
717 | * return target address (ie. the cpu will jump to that address). On | ||
718 | * failure, the previous mmu mode remains and the trap simply returns | ||
719 | * as normal with the appropriate error code in RET0. | ||
720 | */ | ||
721 | #define HV_FAST_MMU_ENABLE 0x27 | ||
722 | |||
723 | /* mmu_unmap_perm_addr() | ||
724 | * TRAP: HV_FAST_TRAP | ||
725 | * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR | ||
726 | * ARG0: virtual address | ||
727 | * ARG1: reserved, must be zero | ||
728 | * ARG2: flags (HV_MMU_{IMMU,DMMU}) | ||
729 | * RET0: status | ||
730 | * ERRORS: EINVAL Invalid virutal address or flags value | ||
731 | * ENOMAP Specified mapping was not found | ||
732 | * | ||
733 | * Demaps any permanent page mapping (established via | ||
734 | * mmu_map_perm_addr()) at the given virtual address for context 0 on | ||
735 | * the current virtual CPU. Any virtual tagged caches are guaranteed | ||
736 | * to be kept consistent. | ||
737 | */ | ||
738 | #define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28 | ||
739 | |||
740 | /* mmu_tsb_ctx0_info() | ||
741 | * TRAP: HV_FAST_TRAP | ||
742 | * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO | ||
743 | * ARG0: max TSBs | ||
744 | * ARG1: buffer pointer | ||
745 | * RET0: status | ||
746 | * RET1: number of TSBs | ||
747 | * ERRORS: EINVAL Supplied buffer is too small | ||
748 | * EBADALIGN The buffer pointer is badly aligned | ||
749 | * ENORADDR Invalid real address for buffer pointer | ||
750 | * | ||
751 | * Return the TSB configuration as previous defined by mmu_tsb_ctx0() | ||
752 | * into the provided buffer. The size of the buffer is given in ARG1 | ||
753 | * in terms of the number of TSB description entries. | ||
754 | * | ||
755 | * Upon return, RET1 always contains the number of TSB descriptions | ||
756 | * previously configured. If zero TSBs were configured, EOK is | ||
757 | * returned with RET1 containing 0. | ||
758 | */ | ||
759 | #define HV_FAST_MMU_TSB_CTX0_INFO 0x29 | ||
760 | |||
761 | /* mmu_tsb_ctxnon0_info() | ||
762 | * TRAP: HV_FAST_TRAP | ||
763 | * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO | ||
764 | * ARG0: max TSBs | ||
765 | * ARG1: buffer pointer | ||
766 | * RET0: status | ||
767 | * RET1: number of TSBs | ||
768 | * ERRORS: EINVAL Supplied buffer is too small | ||
769 | * EBADALIGN The buffer pointer is badly aligned | ||
770 | * ENORADDR Invalid real address for buffer pointer | ||
771 | * | ||
772 | * Return the TSB configuration as previous defined by | ||
773 | * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer | ||
774 | * is given in ARG1 in terms of the number of TSB description entries. | ||
775 | * | ||
776 | * Upon return, RET1 always contains the number of TSB descriptions | ||
777 | * previously configured. If zero TSBs were configured, EOK is | ||
778 | * returned with RET1 containing 0. | ||
779 | */ | ||
780 | #define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a | ||
781 | |||
782 | /* mmu_fault_area_info() | ||
783 | * TRAP: HV_FAST_TRAP | ||
784 | * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO | ||
785 | * RET0: status | ||
786 | * RET1: fault area real address | ||
787 | * ERRORS: No errors defined. | ||
788 | * | ||
789 | * Return the currently defined MMU fault status area for the current | ||
790 | * CPU. The real address of the fault status area is returned in | ||
791 | * RET1, or 0 is returned in RET1 if no fault status area is defined. | ||
792 | * | ||
793 | * Note: mmu_fault_area_conf() may be called with the return value (RET1) | ||
794 | * from this service if there is a need to save and restore the fault | ||
795 | * area for a cpu. | ||
796 | */ | ||
797 | #define HV_FAST_MMU_FAULT_AREA_INFO 0x2b | ||
798 | |||
799 | /* Cache and Memory services. */ | ||
800 | |||
801 | /* mem_scrub() | ||
802 | * TRAP: HV_FAST_TRAP | ||
803 | * FUNCTION: HV_FAST_MEM_SCRUB | ||
804 | * ARG0: real address | ||
805 | * ARG1: length | ||
806 | * RET0: status | ||
807 | * RET1: length scrubbed | ||
808 | * ERRORS: ENORADDR Invalid real address | ||
809 | * EBADALIGN Start address or length are not correctly | ||
810 | * aligned | ||
811 | * EINVAL Length is zero | ||
812 | * | ||
813 | * Zero the memory contents in the range real address to real address | ||
814 | * plus length minus 1. Also, valid ECC will be generated for that | ||
815 | * memory address range. Scrubbing is started at the given real | ||
816 | * address, but may not scrub the entire given length. The actual | ||
817 | * length scrubbed will be returned in RET1. | ||
818 | * | ||
819 | * The real address and length must be aligned on an 8K boundary, or | ||
820 | * contain the start address and length from a sun4v error report. | ||
821 | * | ||
822 | * Note: There are two uses for this function. The first use is to block clear | ||
823 | * and initialize memory and the second is to scrub an u ncorrectable | ||
824 | * error reported via a resumable or non-resumable trap. The second | ||
825 | * use requires the arguments to be equal to the real address and length | ||
826 | * provided in a sun4v memory error report. | ||
827 | */ | ||
828 | #define HV_FAST_MEM_SCRUB 0x31 | ||
829 | |||
830 | /* mem_sync() | ||
831 | * TRAP: HV_FAST_TRAP | ||
832 | * FUNCTION: HV_FAST_MEM_SYNC | ||
833 | * ARG0: real address | ||
834 | * ARG1: length | ||
835 | * RET0: status | ||
836 | * RET1: length synced | ||
837 | * ERRORS: ENORADDR Invalid real address | ||
838 | * EBADALIGN Start address or length are not correctly | ||
839 | * aligned | ||
840 | * EINVAL Length is zero | ||
841 | * | ||
842 | * Force the next access within the real address to real address plus | ||
843 | * length minus 1 to be fetches from main system memory. Less than | ||
844 | * the given length may be synced, the actual amount synced is | ||
845 | * returned in RET1. The real address and length must be aligned on | ||
846 | * an 8K boundary. | ||
847 | */ | ||
848 | #define HV_FAST_MEM_SYNC 0x32 | ||
849 | |||
850 | /* Time of day services. | ||
851 | * | ||
852 | * The hypervisor maintains the time of day on a per-domain basis. | ||
853 | * Changing the time of day in one domain does not affect the time of | ||
854 | * day on any other domain. | ||
855 | * | ||
856 | * Time is described by a single unsigned 64-bit word which is the | ||
857 | * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1, | ||
858 | * 1970). | ||
859 | */ | ||
860 | |||
861 | /* tod_get() | ||
862 | * TRAP: HV_FAST_TRAP | ||
863 | * FUNCTION: HV_FAST_TOD_GET | ||
864 | * RET0: status | ||
865 | * RET1: TOD | ||
866 | * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable | ||
867 | * ENOTSUPPORTED If TOD not supported on this platform | ||
868 | * | ||
869 | * Return the current time of day. May block if TOD access is | ||
870 | * temporarily not possible. | ||
871 | */ | ||
872 | #define HV_FAST_TOD_GET 0x50 | ||
873 | |||
874 | /* tod_set() | ||
875 | * TRAP: HV_FAST_TRAP | ||
876 | * FUNCTION: HV_FAST_TOD_SET | ||
877 | * ARG0: TOD | ||
878 | * RET0: status | ||
879 | * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable | ||
880 | * ENOTSUPPORTED If TOD not supported on this platform | ||
881 | * | ||
882 | * The current time of day is set to the value specified in ARG0. May | ||
883 | * block if TOD access is temporarily not possible. | ||
884 | */ | ||
885 | #define HV_FAST_TOD_SET 0x51 | ||
886 | |||
887 | /* Console services */ | ||
888 | |||
889 | /* con_getchar() | ||
890 | * TRAP: HV_FAST_TRAP | ||
891 | * FUNCTION: HV_FAST_CONS_GETCHAR | ||
892 | * RET0: status | ||
893 | * RET1: character | ||
894 | * ERRORS: EWOULDBLOCK No character available. | ||
895 | * | ||
896 | * Returns a character from the console device. If no character is | ||
897 | * available then an EWOULDBLOCK error is returned. If a character is | ||
898 | * available, then the returned status is EOK and the character value | ||
899 | * is in RET1. | ||
900 | * | ||
901 | * A virtual BREAK is represented by the 64-bit value -1. | ||
902 | * | ||
903 | * A virtual HUP signal is represented by the 64-bit value -2. | ||
904 | */ | ||
905 | #define HV_FAST_CONS_GETCHAR 0x60 | ||
906 | |||
907 | /* con_putchar() | ||
908 | * TRAP: HV_FAST_TRAP | ||
909 | * FUNCTION: HV_FAST_CONS_PUTCHAR | ||
910 | * ARG0: character | ||
911 | * RET0: status | ||
912 | * ERRORS: EINVAL Illegal character | ||
913 | * EWOULDBLOCK Output buffer currentl full, would block | ||
914 | * | ||
915 | * Send a character to the console device. Only character values | ||
916 | * between 0 and 255 may be used. Values outside this range are | ||
917 | * invalid except for the 64-bit value -1 which is used to send a | ||
918 | * virtual BREAK. | ||
919 | */ | ||
920 | #define HV_FAST_CONS_PUTCHAR 0x61 | ||
921 | |||
922 | /* Trap trace services. | ||
923 | * | ||
924 | * The hypervisor provides a trap tracing capability for privileged | ||
925 | * code running on each virtual CPU. Privileged code provides a | ||
926 | * round-robin trap trace queue within which the hypervisor writes | ||
927 | * 64-byte entries detailing hyperprivileged traps taken n behalf of | ||
928 | * privileged code. This is provided as a debugging capability for | ||
929 | * privileged code. | ||
930 | * | ||
931 | * The trap trace control structure is 64-bytes long and placed at the | ||
932 | * start (offset 0) of the trap trace buffer, and is described as | ||
933 | * follows: | ||
934 | */ | ||
935 | #ifndef __ASSEMBLY__ | ||
936 | struct hv_trap_trace_control { | ||
937 | unsigned long head_offset; | ||
938 | unsigned long tail_offset; | ||
939 | unsigned long __reserved[0x30 / sizeof(unsigned long)]; | ||
940 | }; | ||
941 | #endif | ||
942 | #define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00 | ||
943 | #define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08 | ||
944 | |||
945 | /* The head offset is the offset of the most recently completed entry | ||
946 | * in the trap-trace buffer. The tail offset is the offset of the | ||
947 | * next entry to be written. The control structure is owned and | ||
948 | * modified by the hypervisor. A guest may not modify the control | ||
949 | * structure contents. Attempts to do so will result in undefined | ||
950 | * behavior for the guest. | ||
951 | * | ||
952 | * Each trap trace buffer entry is layed out as follows: | ||
953 | */ | ||
954 | #ifndef __ASSEMBLY__ | ||
955 | struct hv_trap_trace_entry { | ||
956 | unsigned char type; /* Hypervisor or guest entry? */ | ||
957 | unsigned char hpstate; /* Hyper-privileged state */ | ||
958 | unsigned char tl; /* Trap level */ | ||
959 | unsigned char gl; /* Global register level */ | ||
960 | unsigned short tt; /* Trap type */ | ||
961 | unsigned short tag; /* Extended trap identifier */ | ||
962 | unsigned long tstate; /* Trap state */ | ||
963 | unsigned long tick; /* Tick */ | ||
964 | unsigned long tpc; /* Trap PC */ | ||
965 | unsigned long f1; /* Entry specific */ | ||
966 | unsigned long f2; /* Entry specific */ | ||
967 | unsigned long f3; /* Entry specific */ | ||
968 | unsigned long f4; /* Entry specific */ | ||
969 | }; | ||
970 | #endif | ||
971 | #define HV_TRAP_TRACE_ENTRY_TYPE 0x00 | ||
972 | #define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01 | ||
973 | #define HV_TRAP_TRACE_ENTRY_TL 0x02 | ||
974 | #define HV_TRAP_TRACE_ENTRY_GL 0x03 | ||
975 | #define HV_TRAP_TRACE_ENTRY_TT 0x04 | ||
976 | #define HV_TRAP_TRACE_ENTRY_TAG 0x06 | ||
977 | #define HV_TRAP_TRACE_ENTRY_TSTATE 0x08 | ||
978 | #define HV_TRAP_TRACE_ENTRY_TICK 0x10 | ||
979 | #define HV_TRAP_TRACE_ENTRY_TPC 0x18 | ||
980 | #define HV_TRAP_TRACE_ENTRY_F1 0x20 | ||
981 | #define HV_TRAP_TRACE_ENTRY_F2 0x28 | ||
982 | #define HV_TRAP_TRACE_ENTRY_F3 0x30 | ||
983 | #define HV_TRAP_TRACE_ENTRY_F4 0x38 | ||
984 | |||
985 | /* The type field is encoded as follows. */ | ||
986 | #define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */ | ||
987 | #define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */ | ||
988 | #define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */ | ||
989 | |||
990 | /* ttrace_buf_conf() | ||
991 | * TRAP: HV_FAST_TRAP | ||
992 | * FUNCTION: HV_FAST_TTRACE_BUF_CONF | ||
993 | * ARG0: real address | ||
994 | * ARG1: number of entries | ||
995 | * RET0: status | ||
996 | * RET1: number of entries | ||
997 | * ERRORS: ENORADDR Invalid real address | ||
998 | * EINVAL Size is too small | ||
999 | * EBADALIGN Real address not aligned on 64-byte boundary | ||
1000 | * | ||
1001 | * Requests hypervisor trap tracing and declares a virtual CPU's trap | ||
1002 | * trace buffer to the hypervisor. The real address supplies the real | ||
1003 | * base address of the trap trace queue and must be 64-byte aligned. | ||
1004 | * Specifying a value of 0 for the number of entries disables trap | ||
1005 | * tracing for the calling virtual CPU. The buffer allocated must be | ||
1006 | * sized for a power of two number of 64-byte trap trace entries plus | ||
1007 | * an initial 64-byte control structure. | ||
1008 | * | ||
1009 | * This may be invoked any number of times so that a virtual CPU may | ||
1010 | * relocate a trap trace buffer or create "snapshots" of information. | ||
1011 | * | ||
1012 | * If the real address is illegal or badly aligned, then trap tracing | ||
1013 | * is disabled and an error is returned. | ||
1014 | * | ||
1015 | * Upon failure with EINVAL, this service call returns in RET1 the | ||
1016 | * minimum number of buffer entries required. Upon other failures | ||
1017 | * RET1 is undefined. | ||
1018 | */ | ||
1019 | #define HV_FAST_TTRACE_BUF_CONF 0x90 | ||
1020 | |||
1021 | /* ttrace_buf_info() | ||
1022 | * TRAP: HV_FAST_TRAP | ||
1023 | * FUNCTION: HV_FAST_TTRACE_BUF_INFO | ||
1024 | * RET0: status | ||
1025 | * RET1: real address | ||
1026 | * RET2: size | ||
1027 | * ERRORS: None defined. | ||
1028 | * | ||
1029 | * Returns the size and location of the previously declared trap-trace | ||
1030 | * buffer. In the event that no buffer was previously defined, or the | ||
1031 | * buffer is disabled, this call will return a size of zero bytes. | ||
1032 | */ | ||
1033 | #define HV_FAST_TTRACE_BUF_INFO 0x91 | ||
1034 | |||
1035 | /* ttrace_enable() | ||
1036 | * TRAP: HV_FAST_TRAP | ||
1037 | * FUNCTION: HV_FAST_TTRACE_ENABLE | ||
1038 | * ARG0: enable | ||
1039 | * RET0: status | ||
1040 | * RET1: previous enable state | ||
1041 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1042 | * | ||
1043 | * Enable or disable trap tracing, and return the previous enabled | ||
1044 | * state in RET1. Future systems may define various flags for the | ||
1045 | * enable argument (ARG0), for the moment a guest should pass | ||
1046 | * "(uint64_t) -1" to enable, and "(uint64_t) 0" to disable all | ||
1047 | * tracing - which will ensure future compatability. | ||
1048 | */ | ||
1049 | #define HV_FAST_TTRACE_ENABLE 0x92 | ||
1050 | |||
1051 | /* ttrace_freeze() | ||
1052 | * TRAP: HV_FAST_TRAP | ||
1053 | * FUNCTION: HV_FAST_TTRACE_FREEZE | ||
1054 | * ARG0: freeze | ||
1055 | * RET0: status | ||
1056 | * RET1: previous freeze state | ||
1057 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1058 | * | ||
1059 | * Freeze or unfreeze trap tracing, returning the previous freeze | ||
1060 | * state in RET1. A guest should pass a non-zero value to freeze and | ||
1061 | * a zero value to unfreeze all tracing. The returned previous state | ||
1062 | * is 0 for not frozen and 1 for frozen. | ||
1063 | */ | ||
1064 | #define HV_FAST_TTRACE_FREEZE 0x93 | ||
1065 | |||
1066 | /* ttrace_addentry() | ||
1067 | * TRAP: HV_TTRACE_ADDENTRY_TRAP | ||
1068 | * ARG0: tag (16-bits) | ||
1069 | * ARG1: data word 0 | ||
1070 | * ARG2: data word 1 | ||
1071 | * ARG3: data word 2 | ||
1072 | * ARG4: data word 3 | ||
1073 | * RET0: status | ||
1074 | * ERRORS: EINVAL No trap trace buffer currently defined | ||
1075 | * | ||
1076 | * Add an entry to the trap trace buffer. Upon return only ARG0/RET0 | ||
1077 | * is modified - none of the other registers holding arguments are | ||
1078 | * volatile across this hypervisor service. | ||
1079 | */ | ||
1080 | |||
1081 | /* Core dump services. | ||
1082 | * | ||
1083 | * Since the hypervisor viraulizes and thus obscures a lot of the | ||
1084 | * physical machine layout and state, traditional OS crash dumps can | ||
1085 | * be difficult to diagnose especially when the problem is a | ||
1086 | * configuration error of some sort. | ||
1087 | * | ||
1088 | * The dump services provide an opaque buffer into which the | ||
1089 | * hypervisor can place it's internal state in order to assist in | ||
1090 | * debugging such situations. The contents are opaque and extremely | ||
1091 | * platform and hypervisor implementation specific. The guest, during | ||
1092 | * a core dump, requests that the hypervisor update any information in | ||
1093 | * the dump buffer in preparation to being dumped as part of the | ||
1094 | * domain's memory image. | ||
1095 | */ | ||
1096 | |||
1097 | /* dump_buf_update() | ||
1098 | * TRAP: HV_FAST_TRAP | ||
1099 | * FUNCTION: HV_FAST_DUMP_BUF_UPDATE | ||
1100 | * ARG0: real address | ||
1101 | * ARG1: size | ||
1102 | * RET0: status | ||
1103 | * RET1: required size of dump buffer | ||
1104 | * ERRORS: ENORADDR Invalid real address | ||
1105 | * EBADALIGN Real address is not aligned on a 64-byte | ||
1106 | * boundary | ||
1107 | * EINVAL Size is non-zero but less than minimum size | ||
1108 | * required | ||
1109 | * ENOTSUPPORTED Operation not supported on current logical | ||
1110 | * domain | ||
1111 | * | ||
1112 | * Declare a domain dump buffer to the hypervisor. The real address | ||
1113 | * provided for the domain dump buffer must be 64-byte aligned. The | ||
1114 | * size specifies the size of the dump buffer and may be larger than | ||
1115 | * the minimum size specified in the machine description. The | ||
1116 | * hypervisor will fill the dump buffer with opaque data. | ||
1117 | * | ||
1118 | * Note: A guest may elect to include dump buffer contents as part of a crash | ||
1119 | * dump to assist with debugging. This function may be called any number | ||
1120 | * of times so that a guest may relocate a dump buffer, or create | ||
1121 | * "snapshots" of any dump-buffer information. Each call to | ||
1122 | * dump_buf_update() atomically declares the new dump buffer to the | ||
1123 | * hypervisor. | ||
1124 | * | ||
1125 | * A specified size of 0 unconfigures the dump buffer. If the real | ||
1126 | * address is illegal or badly aligned, then any currently active dump | ||
1127 | * buffer is disabled and an error is returned. | ||
1128 | * | ||
1129 | * In the event that the call fails with EINVAL, RET1 contains the | ||
1130 | * minimum size requires by the hypervisor for a valid dump buffer. | ||
1131 | */ | ||
1132 | #define HV_FAST_DUMP_BUF_UPDATE 0x94 | ||
1133 | |||
1134 | /* dump_buf_info() | ||
1135 | * TRAP: HV_FAST_TRAP | ||
1136 | * FUNCTION: HV_FAST_DUMP_BUF_INFO | ||
1137 | * RET0: status | ||
1138 | * RET1: real address of current dump buffer | ||
1139 | * RET2: size of current dump buffer | ||
1140 | * ERRORS: No errors defined. | ||
1141 | * | ||
1142 | * Return the currently configures dump buffer description. A | ||
1143 | * returned size of 0 bytes indicates an undefined dump buffer. In | ||
1144 | * this case the return address in RET1 is undefined. | ||
1145 | */ | ||
1146 | #define HV_FAST_DUMP_BUF_INFO 0x95 | ||
1147 | |||
1148 | /* Device interrupt services. | ||
1149 | * | ||
1150 | * Device interrupts are allocated to system bus bridges by the hypervisor, | ||
1151 | * and described to OBP in the machine description. OBP then describes | ||
1152 | * these interrupts to the OS via properties in the device tree. | ||
1153 | * | ||
1154 | * Terminology: | ||
1155 | * | ||
1156 | * cpuid Unique opaque value which represents a target cpu. | ||
1157 | * | ||
1158 | * devhandle Device handle. It uniquely identifies a device, and | ||
1159 | * consistes of the lower 28-bits of the hi-cell of the | ||
1160 | * first entry of the device's "reg" property in the | ||
1161 | * OBP device tree. | ||
1162 | * | ||
1163 | * devino Device interrupt number. Specifies the relative | ||
1164 | * interrupt number within the device. The unique | ||
1165 | * combination of devhandle and devino are used to | ||
1166 | * identify a specific device interrupt. | ||
1167 | * | ||
1168 | * Note: The devino value is the same as the values in the | ||
1169 | * "interrupts" property or "interrupt-map" property | ||
1170 | * in the OBP device tree for that device. | ||
1171 | * | ||
1172 | * sysino System interrupt number. A 64-bit unsigned interger | ||
1173 | * representing a unique interrupt within a virtual | ||
1174 | * machine. | ||
1175 | * | ||
1176 | * intr_state A flag representing the interrupt state for a given | ||
1177 | * sysino. The state values are defined below. | ||
1178 | * | ||
1179 | * intr_enabled A flag representing the 'enabled' state for a given | ||
1180 | * sysino. The enable values are defined below. | ||
1181 | */ | ||
1182 | |||
1183 | #define HV_INTR_STATE_IDLE 0 /* Nothing pending */ | ||
1184 | #define HV_INTR_STATE_RECEIVED 1 /* Interrupt received by hardware */ | ||
1185 | #define HV_INTR_STATE_DELIVERED 2 /* Interrupt delivered to queue */ | ||
1186 | |||
1187 | #define HV_INTR_DISABLED 0 /* sysino not enabled */ | ||
1188 | #define HV_INTR_ENABLED 1 /* sysino enabled */ | ||
1189 | |||
1190 | /* intr_devino_to_sysino() | ||
1191 | * TRAP: HV_FAST_TRAP | ||
1192 | * FUNCTION: HV_FAST_INTR_DEVINO2SYSINO | ||
1193 | * ARG0: devhandle | ||
1194 | * ARG1: devino | ||
1195 | * RET0: status | ||
1196 | * RET1: sysino | ||
1197 | * ERRORS: EINVAL Invalid devhandle/devino | ||
1198 | * | ||
1199 | * Converts a device specific interrupt number of the given | ||
1200 | * devhandle/devino into a system specific ino (sysino). | ||
1201 | */ | ||
1202 | #define HV_FAST_INTR_DEVINO2SYSINO 0xa0 | ||
1203 | |||
1204 | /* intr_getenabled() | ||
1205 | * TRAP: HV_FAST_TRAP | ||
1206 | * FUNCTION: HV_FAST_INTR_GETENABLED | ||
1207 | * ARG0: sysino | ||
1208 | * RET0: status | ||
1209 | * RET1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1210 | * ERRORS: EINVAL Invalid sysino | ||
1211 | * | ||
1212 | * Returns interrupt enabled state in RET1 for the interrupt defined | ||
1213 | * by the given sysino. | ||
1214 | */ | ||
1215 | #define HV_FAST_INTR_GETENABLED 0xa1 | ||
1216 | |||
1217 | /* intr_setenabled() | ||
1218 | * TRAP: HV_FAST_TRAP | ||
1219 | * FUNCTION: HV_FAST_INTR_SETENABLED | ||
1220 | * ARG0: sysino | ||
1221 | * ARG1: intr_enabled (HV_INTR_{DISABLED,ENABLED}) | ||
1222 | * RET0: status | ||
1223 | * ERRORS: EINVAL Invalid sysino or intr_enabled value | ||
1224 | * | ||
1225 | * Set the 'enabled' state of the interrupt sysino. | ||
1226 | */ | ||
1227 | #define HV_FAST_INTR_SETENABLED 0xa2 | ||
1228 | |||
1229 | /* intr_getstate() | ||
1230 | * TRAP: HV_FAST_TRAP | ||
1231 | * FUNCTION: HV_FAST_INTR_GETSTATE | ||
1232 | * ARG0: sysino | ||
1233 | * RET0: status | ||
1234 | * RET1: intr_state (HV_INTR_STATE_*) | ||
1235 | * ERRORS: EINVAL Invalid sysino | ||
1236 | * | ||
1237 | * Returns current state of the interrupt defined by the given sysino. | ||
1238 | */ | ||
1239 | #define HV_FAST_INTR_GETSTATE 0xa3 | ||
1240 | |||
1241 | /* intr_setstate() | ||
1242 | * TRAP: HV_FAST_TRAP | ||
1243 | * FUNCTION: HV_FAST_INTR_SETSTATE | ||
1244 | * ARG0: sysino | ||
1245 | * ARG1: intr_state (HV_INTR_STATE_*) | ||
1246 | * RET0: status | ||
1247 | * ERRORS: EINVAL Invalid sysino or intr_state value | ||
1248 | * | ||
1249 | * Sets the current state of the interrupt described by the given sysino | ||
1250 | * value. | ||
1251 | * | ||
1252 | * Note: Setting the state to HV_INTR_STATE_IDLE clears any pending | ||
1253 | * interrupt for sysino. | ||
1254 | */ | ||
1255 | #define HV_FAST_INTR_SETSTATE 0xa4 | ||
1256 | |||
1257 | /* intr_gettarget() | ||
1258 | * TRAP: HV_FAST_TRAP | ||
1259 | * FUNCTION: HV_FAST_INTR_GETTARGET | ||
1260 | * ARG0: sysino | ||
1261 | * RET0: status | ||
1262 | * RET1: cpuid | ||
1263 | * ERRORS: EINVAL Invalid sysino | ||
1264 | * | ||
1265 | * Returns CPU that is the current target of the interrupt defined by | ||
1266 | * the given sysino. The CPU value returned is undefined if the target | ||
1267 | * has not been set via intr_settarget(). | ||
1268 | */ | ||
1269 | #define HV_FAST_INTR_GETTARGET 0xa5 | ||
1270 | |||
1271 | /* intr_settarget() | ||
1272 | * TRAP: HV_FAST_TRAP | ||
1273 | * FUNCTION: HV_FAST_INTR_SETTARGET | ||
1274 | * ARG0: sysino | ||
1275 | * ARG1: cpuid | ||
1276 | * RET0: status | ||
1277 | * ERRORS: EINVAL Invalid sysino | ||
1278 | * ENOCPU Invalid cpuid | ||
1279 | * | ||
1280 | * Set the target CPU for the interrupt defined by the given sysino. | ||
1281 | */ | ||
1282 | #define HV_FAST_INTR_SETTARGET 0xa6 | ||
1283 | |||
1284 | /* PCI IO services. | ||
1285 | * | ||
1286 | * See the terminology descriptions in the device interrupt services | ||
1287 | * section above as those apply here too. Here are terminology | ||
1288 | * definitions specific to these PCI IO services: | ||
1289 | * | ||
1290 | * tsbnum TSB number. Indentifies which io-tsb is used. | ||
1291 | * For this version of the specification, tsbnum | ||
1292 | * must be zero. | ||
1293 | * | ||
1294 | * tsbindex TSB index. Identifies which entry in the TSB | ||
1295 | * is used. The first entry is zero. | ||
1296 | * | ||
1297 | * tsbid A 64-bit aligned data structure which contains | ||
1298 | * a tsbnum and a tsbindex. Bits 63:32 contain the | ||
1299 | * tsbnum and bits 31:00 contain the tsbindex. | ||
1300 | * | ||
1301 | * io_attributes IO attributes for IOMMU mappings. One of more | ||
1302 | * of the attritbute bits are stores in a 64-bit | ||
1303 | * value. The values are defined below. | ||
1304 | * | ||
1305 | * r_addr 64-bit real address | ||
1306 | * | ||
1307 | * pci_device PCI device address. A PCI device address identifies | ||
1308 | * a specific device on a specific PCI bus segment. | ||
1309 | * A PCI device address ia a 32-bit unsigned integer | ||
1310 | * with the following format: | ||
1311 | * | ||
1312 | * 00000000.bbbbbbbb.dddddfff.00000000 | ||
1313 | * | ||
1314 | * Use the HV_PCI_DEVICE_BUILD() macro to construct | ||
1315 | * such values. | ||
1316 | * | ||
1317 | * pci_config_offset | ||
1318 | * PCI configureation space offset. For conventional | ||
1319 | * PCI a value between 0 and 255. For extended | ||
1320 | * configuration space, a value between 0 and 4095. | ||
1321 | * | ||
1322 | * Note: For PCI configuration space accesses, the offset | ||
1323 | * must be aligned to the access size. | ||
1324 | * | ||
1325 | * error_flag A return value which specifies if the action succeeded | ||
1326 | * or failed. 0 means no error, non-0 means some error | ||
1327 | * occurred while performing the service. | ||
1328 | * | ||
1329 | * io_sync_direction | ||
1330 | * Direction definition for pci_dma_sync(), defined | ||
1331 | * below in HV_PCI_SYNC_*. | ||
1332 | * | ||
1333 | * io_page_list A list of io_page_addresses, an io_page_address is | ||
1334 | * a real address. | ||
1335 | * | ||
1336 | * io_page_list_p A pointer to an io_page_list. | ||
1337 | * | ||
1338 | * "size based byte swap" - Some functions do size based byte swapping | ||
1339 | * which allows sw to access pointers and | ||
1340 | * counters in native form when the processor | ||
1341 | * operates in a different endianness than the | ||
1342 | * IO bus. Size-based byte swapping converts a | ||
1343 | * multi-byte field between big-endian and | ||
1344 | * little-endian format. | ||
1345 | */ | ||
1346 | |||
1347 | #define HV_PCI_MAP_ATTR_READ 0x01 | ||
1348 | #define HV_PCI_MAP_ATTR_WRITE 0x02 | ||
1349 | |||
1350 | #define HV_PCI_DEVICE_BUILD(b,d,f) \ | ||
1351 | ((((b) & 0xff) << 16) | \ | ||
1352 | (((d) & 0x1f) << 11) | \ | ||
1353 | (((f) & 0x07) << 8)) | ||
1354 | |||
1355 | #define HV_PCI_SYNC_FOR_DEVICE 0x01 | ||
1356 | #define HV_PCI_SYNC_FOR_CPU 0x02 | ||
1357 | |||
1358 | /* pci_iommu_map() | ||
1359 | * TRAP: HV_FAST_TRAP | ||
1360 | * FUNCTION: HV_FAST_PCI_IOMMU_MAP | ||
1361 | * ARG0: devhandle | ||
1362 | * ARG1: tsbid | ||
1363 | * ARG2: #ttes | ||
1364 | * ARG3: io_attributes | ||
1365 | * ARG4: io_page_list_p | ||
1366 | * RET0: status | ||
1367 | * RET1: #ttes mapped | ||
1368 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex/io_attributes | ||
1369 | * EBADALIGN Improperly aligned real address | ||
1370 | * ENORADDR Invalid real address | ||
1371 | * | ||
1372 | * Create IOMMU mappings in the sun4v device defined by the given | ||
1373 | * devhandle. The mappings are created in the TSB defined by the | ||
1374 | * tsbnum component of the given tsbid. The first mapping is created | ||
1375 | * in the TSB i ndex defined by the tsbindex component of the given tsbid. | ||
1376 | * The call creates up to #ttes mappings, the first one at tsbnum, tsbindex, | ||
1377 | * the second at tsbnum, tsbindex + 1, etc. | ||
1378 | * | ||
1379 | * All mappings are created with the attributes defined by the io_attributes | ||
1380 | * argument. The page mapping addresses are described in the io_page_list | ||
1381 | * defined by the given io_page_list_p, which is a pointer to the io_page_list. | ||
1382 | * The first entry in the io_page_list is the address for the first iotte, the | ||
1383 | * 2nd for the 2nd iotte, and so on. | ||
1384 | * | ||
1385 | * Each io_page_address in the io_page_list must be appropriately aligned. | ||
1386 | * #ttes must be greater than zero. For this version of the spec, the tsbnum | ||
1387 | * component of the given tsbid must be zero. | ||
1388 | * | ||
1389 | * Returns the actual number of mappings creates, which may be less than | ||
1390 | * or equal to the argument #ttes. If the function returns a value which | ||
1391 | * is less than the #ttes, the caller may continus to call the function with | ||
1392 | * an updated tsbid, #ttes, io_page_list_p arguments until all pages are | ||
1393 | * mapped. | ||
1394 | * | ||
1395 | * Note: This function does not imply an iotte cache flush. The guest must | ||
1396 | * demap an entry before re-mapping it. | ||
1397 | */ | ||
1398 | #define HV_FAST_PCI_IOMMU_MAP 0xb0 | ||
1399 | |||
1400 | /* pci_iommu_demap() | ||
1401 | * TRAP: HV_FAST_TRAP | ||
1402 | * FUNCTION: HV_FAST_PCI_IOMMU_DEMAP | ||
1403 | * ARG0: devhandle | ||
1404 | * ARG1: tsbid | ||
1405 | * ARG2: #ttes | ||
1406 | * RET0: status | ||
1407 | * RET1: #ttes demapped | ||
1408 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex | ||
1409 | * | ||
1410 | * Demap and flush IOMMU mappings in the device defined by the given | ||
1411 | * devhandle. Demaps up to #ttes entries in the TSB defined by the tsbnum | ||
1412 | * component of the given tsbid, starting at the TSB index defined by the | ||
1413 | * tsbindex component of the given tsbid. | ||
1414 | * | ||
1415 | * For this version of the spec, the tsbnum of the given tsbid must be zero. | ||
1416 | * #ttes must be greater than zero. | ||
1417 | * | ||
1418 | * Returns the actual number of ttes demapped, which may be less than or equal | ||
1419 | * to the argument #ttes. If #ttes demapped is less than #ttes, the caller | ||
1420 | * may continue to call this function with updated tsbid and #ttes arguments | ||
1421 | * until all pages are demapped. | ||
1422 | * | ||
1423 | * Note: Entries do not have to be mapped to be demapped. A demap of an | ||
1424 | * unmapped page will flush the entry from the tte cache. | ||
1425 | */ | ||
1426 | #define HV_FAST_PCI_IOMMU_DEMAP 0xb1 | ||
1427 | |||
1428 | /* pci_iommu_getmap() | ||
1429 | * TRAP: HV_FAST_TRAP | ||
1430 | * FUNCTION: HV_FAST_PCI_IOMMU_GETMAP | ||
1431 | * ARG0: devhandle | ||
1432 | * ARG1: tsbid | ||
1433 | * RET0: status | ||
1434 | * RET1: io_attributes | ||
1435 | * RET2: real address | ||
1436 | * ERRORS: EINVAL Invalid devhandle/tsbnum/tsbindex | ||
1437 | * ENOMAP Mapping is not valid, no translation exists | ||
1438 | * | ||
1439 | * Read and return the mapping in the device described by the given devhandle | ||
1440 | * and tsbid. If successful, the io_attributes shall be returned in RET1 | ||
1441 | * and the page address of the mapping shall be returned in RET2. | ||
1442 | * | ||
1443 | * For this version of the spec, the tsbnum component of the given tsbid | ||
1444 | * must be zero. | ||
1445 | */ | ||
1446 | #define HV_FAST_PCI_IOMMU_GETMAP 0xb2 | ||
1447 | |||
1448 | /* pci_iommu_getbypass() | ||
1449 | * TRAP: HV_FAST_TRAP | ||
1450 | * FUNCTION: HV_FAST_PCI_IOMMU_GETBYPASS | ||
1451 | * ARG0: devhandle | ||
1452 | * ARG1: real address | ||
1453 | * ARG2: io_attributes | ||
1454 | * RET0: status | ||
1455 | * RET1: io_addr | ||
1456 | * ERRORS: EINVAL Invalid devhandle/io_attributes | ||
1457 | * ENORADDR Invalid real address | ||
1458 | * ENOTSUPPORTED Function not supported in this implementation. | ||
1459 | * | ||
1460 | * Create a "special" mapping in the device described by the given devhandle, | ||
1461 | * for the given real address and attributes. Return the IO address in RET1 | ||
1462 | * if successful. | ||
1463 | */ | ||
1464 | #define HV_FAST_PCI_IOMMU_GETBYPASS 0xb3 | ||
1465 | |||
1466 | /* pci_config_get() | ||
1467 | * TRAP: HV_FAST_TRAP | ||
1468 | * FUNCTION: HV_FAST_PCI_CONFIG_GET | ||
1469 | * ARG0: devhandle | ||
1470 | * ARG1: pci_device | ||
1471 | * ARG2: pci_config_offset | ||
1472 | * ARG3: size | ||
1473 | * RET0: status | ||
1474 | * RET1: error_flag | ||
1475 | * RET2: data | ||
1476 | * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size | ||
1477 | * EBADALIGN pci_config_offset not size aligned | ||
1478 | * ENOACCESS Access to this offset is not permitted | ||
1479 | * | ||
1480 | * Read PCI configuration space for the adapter described by the given | ||
1481 | * devhandle. Read size (1, 2, or 4) bytes of data from the given | ||
1482 | * pci_device, at pci_config_offset from the beginning of the device's | ||
1483 | * configuration space. If there was no error, RET1 is set to zero and | ||
1484 | * RET2 is set to the data read. Insignificant bits in RET2 are not | ||
1485 | * guarenteed to have any specific value and therefore must be ignored. | ||
1486 | * | ||
1487 | * The data returned in RET2 is size based byte swapped. | ||
1488 | * | ||
1489 | * If an error occurs during the read, set RET1 to a non-zero value. The | ||
1490 | * given pci_config_offset must be 'size' aligned. | ||
1491 | */ | ||
1492 | #define HV_FAST_PCI_CONFIG_GET 0xb4 | ||
1493 | |||
1494 | /* pci_config_put() | ||
1495 | * TRAP: HV_FAST_TRAP | ||
1496 | * FUNCTION: HV_FAST_PCI_CONFIG_PUT | ||
1497 | * ARG0: devhandle | ||
1498 | * ARG1: pci_device | ||
1499 | * ARG2: pci_config_offset | ||
1500 | * ARG3: size | ||
1501 | * ARG4: data | ||
1502 | * RET0: status | ||
1503 | * RET1: error_flag | ||
1504 | * ERRORS: EINVAL Invalid devhandle/pci_device/offset/size | ||
1505 | * EBADALIGN pci_config_offset not size aligned | ||
1506 | * ENOACCESS Access to this offset is not permitted | ||
1507 | * | ||
1508 | * Write PCI configuration space for the adapter described by the given | ||
1509 | * devhandle. Write size (1, 2, or 4) bytes of data in a single operation, | ||
1510 | * at pci_config_offset from the beginning of the device's configuration | ||
1511 | * space. The data argument contains the data to be written to configuration | ||
1512 | * space. Prior to writing, the data is size based byte swapped. | ||
1513 | * | ||
1514 | * If an error occurs during the write access, do not generate an error | ||
1515 | * report, do set RET1 to a non-zero value. Otherwise RET1 is zero. | ||
1516 | * The given pci_config_offset must be 'size' aligned. | ||
1517 | * | ||
1518 | * This function is permitted to read from offset zero in the configuration | ||
1519 | * space described by the given pci_device if necessary to ensure that the | ||
1520 | * write access to config space completes. | ||
1521 | */ | ||
1522 | #define HV_FAST_PCI_CONFIG_PUT 0xb5 | ||
1523 | |||
1524 | /* pci_peek() | ||
1525 | * TRAP: HV_FAST_TRAP | ||
1526 | * FUNCTION: HV_FAST_PCI_PEEK | ||
1527 | * ARG0: devhandle | ||
1528 | * ARG1: real address | ||
1529 | * ARG2: size | ||
1530 | * RET0: status | ||
1531 | * RET1: error_flag | ||
1532 | * RET2: data | ||
1533 | * ERRORS: EINVAL Invalid devhandle or size | ||
1534 | * EBADALIGN Improperly aligned real address | ||
1535 | * ENORADDR Bad real address | ||
1536 | * ENOACCESS Guest access prohibited | ||
1537 | * | ||
1538 | * Attempt to read the IO address given by the given devhandle, real address, | ||
1539 | * and size. Size must be 1, 2, 4, or 8. The read is performed as a single | ||
1540 | * access operation using the given size. If an error occurs when reading | ||
1541 | * from the given location, do not generate an error report, but return a | ||
1542 | * non-zero value in RET1. If the read was successful, return zero in RET1 | ||
1543 | * and return the actual data read in RET2. The data returned is size based | ||
1544 | * byte swapped. | ||
1545 | * | ||
1546 | * Non-significant bits in RET2 are not guarenteed to have any specific value | ||
1547 | * and therefore must be ignored. If RET1 is returned as non-zero, the data | ||
1548 | * value is not guarenteed to have any specific value and should be ignored. | ||
1549 | * | ||
1550 | * The caller must have permission to read from the given devhandle, real | ||
1551 | * address, which must be an IO address. The argument real address must be a | ||
1552 | * size aligned address. | ||
1553 | * | ||
1554 | * The hypervisor implementation of this function must block access to any | ||
1555 | * IO address that the guest does not have explicit permission to access. | ||
1556 | */ | ||
1557 | #define HV_FAST_PCI_PEEK 0xb6 | ||
1558 | |||
1559 | /* pci_poke() | ||
1560 | * TRAP: HV_FAST_TRAP | ||
1561 | * FUNCTION: HV_FAST_PCI_POKE | ||
1562 | * ARG0: devhandle | ||
1563 | * ARG1: real address | ||
1564 | * ARG2: size | ||
1565 | * ARG3: data | ||
1566 | * ARG4: pci_device | ||
1567 | * RET0: status | ||
1568 | * RET1: error_flag | ||
1569 | * ERRORS: EINVAL Invalid devhandle, size, or pci_device | ||
1570 | * EBADALIGN Improperly aligned real address | ||
1571 | * ENORADDR Bad real address | ||
1572 | * ENOACCESS Guest access prohibited | ||
1573 | * ENOTSUPPORTED Function is not supported by implementation | ||
1574 | * | ||
1575 | * Attempt to write data to the IO address given by the given devhandle, | ||
1576 | * real address, and size. Size must be 1, 2, 4, or 8. The write is | ||
1577 | * performed as a single access operation using the given size. Prior to | ||
1578 | * writing the data is size based swapped. | ||
1579 | * | ||
1580 | * If an error occurs when writing to the given location, do not generate an | ||
1581 | * error report, but return a non-zero value in RET1. If the write was | ||
1582 | * successful, return zero in RET1. | ||
1583 | * | ||
1584 | * pci_device describes the configuration address of the device being | ||
1585 | * written to. The implementation may safely read from offset 0 with | ||
1586 | * the configuration space of the device described by devhandle and | ||
1587 | * pci_device in order to guarantee that the write portion of the operation | ||
1588 | * completes | ||
1589 | * | ||
1590 | * Any error that occurs due to the read shall be reported using the normal | ||
1591 | * error reporting mechanisms .. the read error is not suppressed. | ||
1592 | * | ||
1593 | * The caller must have permission to write to the given devhandle, real | ||
1594 | * address, which must be an IO address. The argument real address must be a | ||
1595 | * size aligned address. The caller must have permission to read from | ||
1596 | * the given devhandle, pci_device cofiguration space offset 0. | ||
1597 | * | ||
1598 | * The hypervisor implementation of this function must block access to any | ||
1599 | * IO address that the guest does not have explicit permission to access. | ||
1600 | */ | ||
1601 | #define HV_FAST_PCI_POKE 0xb7 | ||
1602 | |||
1603 | /* pci_dma_sync() | ||
1604 | * TRAP: HV_FAST_TRAP | ||
1605 | * FUNCTION: HV_FAST_PCI_DMA_SYNC | ||
1606 | * ARG0: devhandle | ||
1607 | * ARG1: real address | ||
1608 | * ARG2: size | ||
1609 | * ARG3: io_sync_direction | ||
1610 | * RET0: status | ||
1611 | * RET1: #synced | ||
1612 | * ERRORS: EINVAL Invalid devhandle or io_sync_direction | ||
1613 | * ENORADDR Bad real address | ||
1614 | * | ||
1615 | * Synchronize a memory region described by the given real address and size, | ||
1616 | * for the device defined by the given devhandle using the direction(s) | ||
1617 | * defined by the given io_sync_direction. The argument size is the size of | ||
1618 | * the memory region in bytes. | ||
1619 | * | ||
1620 | * Return the actual number of bytes synchronized in the return value #synced, | ||
1621 | * which may be less than or equal to the argument size. If the return | ||
1622 | * value #synced is less than size, the caller must continue to call this | ||
1623 | * function with updated real address and size arguments until the entire | ||
1624 | * memory region is synchronized. | ||
1625 | */ | ||
1626 | #define HV_FAST_PCI_DMA_SYNC 0xb8 | ||
1627 | |||
1628 | /* PCI MSI services. */ | ||
1629 | |||
1630 | #define HV_MSITYPE_MSI32 0x00 | ||
1631 | #define HV_MSITYPE_MSI64 0x01 | ||
1632 | |||
1633 | #define HV_MSIQSTATE_IDLE 0x00 | ||
1634 | #define HV_MSIQSTATE_ERROR 0x01 | ||
1635 | |||
1636 | #define HV_MSIQ_INVALID 0x00 | ||
1637 | #define HV_MSIQ_VALID 0x01 | ||
1638 | |||
1639 | #define HV_MSISTATE_IDLE 0x00 | ||
1640 | #define HV_MSISTATE_DELIVERED 0x01 | ||
1641 | |||
1642 | #define HV_MSIVALID_INVALID 0x00 | ||
1643 | #define HV_MSIVALID_VALID 0x01 | ||
1644 | |||
1645 | #define HV_PCIE_MSGTYPE_PME_MSG 0x18 | ||
1646 | #define HV_PCIE_MSGTYPE_PME_ACK_MSG 0x1b | ||
1647 | #define HV_PCIE_MSGTYPE_CORR_MSG 0x30 | ||
1648 | #define HV_PCIE_MSGTYPE_NONFATAL_MSG 0x31 | ||
1649 | #define HV_PCIE_MSGTYPE_FATAL_MSG 0x33 | ||
1650 | |||
1651 | #define HV_MSG_INVALID 0x00 | ||
1652 | #define HV_MSG_VALID 0x01 | ||
1653 | |||
1654 | /* pci_msiq_conf() | ||
1655 | * TRAP: HV_FAST_TRAP | ||
1656 | * FUNCTION: HV_FAST_PCI_MSIQ_CONF | ||
1657 | * ARG0: devhandle | ||
1658 | * ARG1: msiqid | ||
1659 | * ARG2: real address | ||
1660 | * ARG3: number of entries | ||
1661 | * RET0: status | ||
1662 | * ERRORS: EINVAL Invalid devhandle, msiqid or nentries | ||
1663 | * EBADALIGN Improperly aligned real address | ||
1664 | * ENORADDR Bad real address | ||
1665 | * | ||
1666 | * Configure the MSI queue given by the devhandle and msiqid arguments, | ||
1667 | * and to be placed at the given real address and be of the given | ||
1668 | * number of entries. The real address must be aligned exactly to match | ||
1669 | * the queue size. Each queue entry is 64-bytes long, so f.e. a 32 entry | ||
1670 | * queue must be aligned on a 2048 byte real address boundary. The MSI-EQ | ||
1671 | * Head and Tail are initialized so that the MSI-EQ is 'empty'. | ||
1672 | * | ||
1673 | * Implementation Note: Certain implementations have fixed sized queues. In | ||
1674 | * that case, number of entries must contain the correct | ||
1675 | * value. | ||
1676 | */ | ||
1677 | #define HV_FAST_PCI_MSIQ_CONF 0xc0 | ||
1678 | |||
1679 | /* pci_msiq_info() | ||
1680 | * TRAP: HV_FAST_TRAP | ||
1681 | * FUNCTION: HV_FAST_PCI_MSIQ_INFO | ||
1682 | * ARG0: devhandle | ||
1683 | * ARG1: msiqid | ||
1684 | * RET0: status | ||
1685 | * RET1: real address | ||
1686 | * RET2: number of entries | ||
1687 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1688 | * | ||
1689 | * Return the configuration information for the MSI queue described | ||
1690 | * by the given devhandle and msiqid. The base address of the queue | ||
1691 | * is returned in ARG1 and the number of entries is returned in ARG2. | ||
1692 | * If the queue is unconfigured, the real address is undefined and the | ||
1693 | * number of entries will be returned as zero. | ||
1694 | */ | ||
1695 | #define HV_FAST_PCI_MSIQ_INFO 0xc1 | ||
1696 | |||
1697 | /* pci_msiq_getvalid() | ||
1698 | * TRAP: HV_FAST_TRAP | ||
1699 | * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID | ||
1700 | * ARG0: devhandle | ||
1701 | * ARG1: msiqid | ||
1702 | * RET0: status | ||
1703 | * RET1: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) | ||
1704 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1705 | * | ||
1706 | * Get the valid state of the MSI-EQ described by the given devhandle and | ||
1707 | * msiqid. | ||
1708 | */ | ||
1709 | #define HV_FAST_PCI_MSIQ_GETVALID 0xc2 | ||
1710 | |||
1711 | /* pci_msiq_setvalid() | ||
1712 | * TRAP: HV_FAST_TRAP | ||
1713 | * FUNCTION: HV_FAST_PCI_MSIQ_SETVALID | ||
1714 | * ARG0: devhandle | ||
1715 | * ARG1: msiqid | ||
1716 | * ARG2: msiqvalid (HV_MSIQ_VALID or HV_MSIQ_INVALID) | ||
1717 | * RET0: status | ||
1718 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqvalid | ||
1719 | * value or MSI EQ is uninitialized | ||
1720 | * | ||
1721 | * Set the valid state of the MSI-EQ described by the given devhandle and | ||
1722 | * msiqid to the given msiqvalid. | ||
1723 | */ | ||
1724 | #define HV_FAST_PCI_MSIQ_SETVALID 0xc3 | ||
1725 | |||
1726 | /* pci_msiq_getstate() | ||
1727 | * TRAP: HV_FAST_TRAP | ||
1728 | * FUNCTION: HV_FAST_PCI_MSIQ_GETSTATE | ||
1729 | * ARG0: devhandle | ||
1730 | * ARG1: msiqid | ||
1731 | * RET0: status | ||
1732 | * RET1: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) | ||
1733 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1734 | * | ||
1735 | * Get the state of the MSI-EQ described by the given devhandle and | ||
1736 | * msiqid. | ||
1737 | */ | ||
1738 | #define HV_FAST_PCI_MSIQ_GETSTATE 0xc4 | ||
1739 | |||
1740 | /* pci_msiq_getvalid() | ||
1741 | * TRAP: HV_FAST_TRAP | ||
1742 | * FUNCTION: HV_FAST_PCI_MSIQ_GETVALID | ||
1743 | * ARG0: devhandle | ||
1744 | * ARG1: msiqid | ||
1745 | * ARG2: msiqstate (HV_MSIQSTATE_IDLE or HV_MSIQSTATE_ERROR) | ||
1746 | * RET0: status | ||
1747 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqstate | ||
1748 | * value or MSI EQ is uninitialized | ||
1749 | * | ||
1750 | * Set the state of the MSI-EQ described by the given devhandle and | ||
1751 | * msiqid to the given msiqvalid. | ||
1752 | */ | ||
1753 | #define HV_FAST_PCI_MSIQ_SETSTATE 0xc5 | ||
1754 | |||
1755 | /* pci_msiq_gethead() | ||
1756 | * TRAP: HV_FAST_TRAP | ||
1757 | * FUNCTION: HV_FAST_PCI_MSIQ_GETHEAD | ||
1758 | * ARG0: devhandle | ||
1759 | * ARG1: msiqid | ||
1760 | * RET0: status | ||
1761 | * RET1: msiqhead | ||
1762 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1763 | * | ||
1764 | * Get the current MSI EQ queue head for the MSI-EQ described by the | ||
1765 | * given devhandle and msiqid. | ||
1766 | */ | ||
1767 | #define HV_FAST_PCI_MSIQ_GETHEAD 0xc6 | ||
1768 | |||
1769 | /* pci_msiq_sethead() | ||
1770 | * TRAP: HV_FAST_TRAP | ||
1771 | * FUNCTION: HV_FAST_PCI_MSIQ_SETHEAD | ||
1772 | * ARG0: devhandle | ||
1773 | * ARG1: msiqid | ||
1774 | * ARG2: msiqhead | ||
1775 | * RET0: status | ||
1776 | * ERRORS: EINVAL Invalid devhandle or msiqid or msiqhead, | ||
1777 | * or MSI EQ is uninitialized | ||
1778 | * | ||
1779 | * Set the current MSI EQ queue head for the MSI-EQ described by the | ||
1780 | * given devhandle and msiqid. | ||
1781 | */ | ||
1782 | #define HV_FAST_PCI_MSIQ_SETHEAD 0xc7 | ||
1783 | |||
1784 | /* pci_msiq_gettail() | ||
1785 | * TRAP: HV_FAST_TRAP | ||
1786 | * FUNCTION: HV_FAST_PCI_MSIQ_GETTAIL | ||
1787 | * ARG0: devhandle | ||
1788 | * ARG1: msiqid | ||
1789 | * RET0: status | ||
1790 | * RET1: msiqtail | ||
1791 | * ERRORS: EINVAL Invalid devhandle or msiqid | ||
1792 | * | ||
1793 | * Get the current MSI EQ queue tail for the MSI-EQ described by the | ||
1794 | * given devhandle and msiqid. | ||
1795 | */ | ||
1796 | #define HV_FAST_PCI_MSIQ_GETTAIL 0xc8 | ||
1797 | |||
1798 | /* pci_msi_getvalid() | ||
1799 | * TRAP: HV_FAST_TRAP | ||
1800 | * FUNCTION: HV_FAST_PCI_MSI_GETVALID | ||
1801 | * ARG0: devhandle | ||
1802 | * ARG1: msinum | ||
1803 | * RET0: status | ||
1804 | * RET1: msivalidstate | ||
1805 | * ERRORS: EINVAL Invalid devhandle or msinum | ||
1806 | * | ||
1807 | * Get the current valid/enabled state for the MSI defined by the | ||
1808 | * given devhandle and msinum. | ||
1809 | */ | ||
1810 | #define HV_FAST_PCI_MSI_GETVALID 0xc9 | ||
1811 | |||
1812 | /* pci_msi_setvalid() | ||
1813 | * TRAP: HV_FAST_TRAP | ||
1814 | * FUNCTION: HV_FAST_PCI_MSI_SETVALID | ||
1815 | * ARG0: devhandle | ||
1816 | * ARG1: msinum | ||
1817 | * ARG2: msivalidstate | ||
1818 | * RET0: status | ||
1819 | * ERRORS: EINVAL Invalid devhandle or msinum or msivalidstate | ||
1820 | * | ||
1821 | * Set the current valid/enabled state for the MSI defined by the | ||
1822 | * given devhandle and msinum. | ||
1823 | */ | ||
1824 | #define HV_FAST_PCI_MSI_SETVALID 0xca | ||
1825 | |||
1826 | /* pci_msi_getmsiq() | ||
1827 | * TRAP: HV_FAST_TRAP | ||
1828 | * FUNCTION: HV_FAST_PCI_MSI_GETMSIQ | ||
1829 | * ARG0: devhandle | ||
1830 | * ARG1: msinum | ||
1831 | * RET0: status | ||
1832 | * RET1: msiqid | ||
1833 | * ERRORS: EINVAL Invalid devhandle or msinum or MSI is unbound | ||
1834 | * | ||
1835 | * Get the MSI EQ that the MSI defined by the given devhandle and | ||
1836 | * msinum is bound to. | ||
1837 | */ | ||
1838 | #define HV_FAST_PCI_MSI_GETMSIQ 0xcb | ||
1839 | |||
1840 | /* pci_msi_setmsiq() | ||
1841 | * TRAP: HV_FAST_TRAP | ||
1842 | * FUNCTION: HV_FAST_PCI_MSI_SETMSIQ | ||
1843 | * ARG0: devhandle | ||
1844 | * ARG1: msinum | ||
1845 | * ARG2: msitype | ||
1846 | * ARG3: msiqid | ||
1847 | * RET0: status | ||
1848 | * ERRORS: EINVAL Invalid devhandle or msinum or msiqid | ||
1849 | * | ||
1850 | * Set the MSI EQ that the MSI defined by the given devhandle and | ||
1851 | * msinum is bound to. | ||
1852 | */ | ||
1853 | #define HV_FAST_PCI_MSI_SETMSIQ 0xcc | ||
1854 | |||
1855 | /* pci_msi_getstate() | ||
1856 | * TRAP: HV_FAST_TRAP | ||
1857 | * FUNCTION: HV_FAST_PCI_MSI_GETSTATE | ||
1858 | * ARG0: devhandle | ||
1859 | * ARG1: msinum | ||
1860 | * RET0: status | ||
1861 | * RET1: msistate | ||
1862 | * ERRORS: EINVAL Invalid devhandle or msinum | ||
1863 | * | ||
1864 | * Get the state of the MSI defined by the given devhandle and msinum. | ||
1865 | * If not initialized, return HV_MSISTATE_IDLE. | ||
1866 | */ | ||
1867 | #define HV_FAST_PCI_MSI_GETSTATE 0xcd | ||
1868 | |||
1869 | /* pci_msi_setstate() | ||
1870 | * TRAP: HV_FAST_TRAP | ||
1871 | * FUNCTION: HV_FAST_PCI_MSI_SETSTATE | ||
1872 | * ARG0: devhandle | ||
1873 | * ARG1: msinum | ||
1874 | * ARG2: msistate | ||
1875 | * RET0: status | ||
1876 | * ERRORS: EINVAL Invalid devhandle or msinum or msistate | ||
1877 | * | ||
1878 | * Set the state of the MSI defined by the given devhandle and msinum. | ||
1879 | */ | ||
1880 | #define HV_FAST_PCI_MSI_SETSTATE 0xce | ||
1881 | |||
1882 | /* pci_msg_getmsiq() | ||
1883 | * TRAP: HV_FAST_TRAP | ||
1884 | * FUNCTION: HV_FAST_PCI_MSG_GETMSIQ | ||
1885 | * ARG0: devhandle | ||
1886 | * ARG1: msgtype | ||
1887 | * RET0: status | ||
1888 | * RET1: msiqid | ||
1889 | * ERRORS: EINVAL Invalid devhandle or msgtype | ||
1890 | * | ||
1891 | * Get the MSI EQ of the MSG defined by the given devhandle and msgtype. | ||
1892 | */ | ||
1893 | #define HV_FAST_PCI_MSG_GETMSIQ 0xd0 | ||
1894 | |||
1895 | /* pci_msg_setmsiq() | ||
1896 | * TRAP: HV_FAST_TRAP | ||
1897 | * FUNCTION: HV_FAST_PCI_MSG_SETMSIQ | ||
1898 | * ARG0: devhandle | ||
1899 | * ARG1: msgtype | ||
1900 | * ARG2: msiqid | ||
1901 | * RET0: status | ||
1902 | * ERRORS: EINVAL Invalid devhandle, msgtype, or msiqid | ||
1903 | * | ||
1904 | * Set the MSI EQ of the MSG defined by the given devhandle and msgtype. | ||
1905 | */ | ||
1906 | #define HV_FAST_PCI_MSG_SETMSIQ 0xd1 | ||
1907 | |||
1908 | /* pci_msg_getvalid() | ||
1909 | * TRAP: HV_FAST_TRAP | ||
1910 | * FUNCTION: HV_FAST_PCI_MSG_GETVALID | ||
1911 | * ARG0: devhandle | ||
1912 | * ARG1: msgtype | ||
1913 | * RET0: status | ||
1914 | * RET1: msgvalidstate | ||
1915 | * ERRORS: EINVAL Invalid devhandle or msgtype | ||
1916 | * | ||
1917 | * Get the valid/enabled state of the MSG defined by the given | ||
1918 | * devhandle and msgtype. | ||
1919 | */ | ||
1920 | #define HV_FAST_PCI_MSG_GETVALID 0xd2 | ||
1921 | |||
1922 | /* pci_msg_setvalid() | ||
1923 | * TRAP: HV_FAST_TRAP | ||
1924 | * FUNCTION: HV_FAST_PCI_MSG_SETVALID | ||
1925 | * ARG0: devhandle | ||
1926 | * ARG1: msgtype | ||
1927 | * ARG2: msgvalidstate | ||
1928 | * RET0: status | ||
1929 | * ERRORS: EINVAL Invalid devhandle or msgtype or msgvalidstate | ||
1930 | * | ||
1931 | * Set the valid/enabled state of the MSG defined by the given | ||
1932 | * devhandle and msgtype. | ||
1933 | */ | ||
1934 | #define HV_FAST_PCI_MSG_SETVALID 0xd3 | ||
1935 | |||
1936 | /* Performance counter services. */ | ||
1937 | |||
1938 | #define HV_PERF_JBUS_PERF_CTRL_REG 0x00 | ||
1939 | #define HV_PERF_JBUS_PERF_CNT_REG 0x01 | ||
1940 | #define HV_PERF_DRAM_PERF_CTRL_REG_0 0x02 | ||
1941 | #define HV_PERF_DRAM_PERF_CNT_REG_0 0x03 | ||
1942 | #define HV_PERF_DRAM_PERF_CTRL_REG_1 0x04 | ||
1943 | #define HV_PERF_DRAM_PERF_CNT_REG_1 0x05 | ||
1944 | #define HV_PERF_DRAM_PERF_CTRL_REG_2 0x06 | ||
1945 | #define HV_PERF_DRAM_PERF_CNT_REG_2 0x07 | ||
1946 | #define HV_PERF_DRAM_PERF_CTRL_REG_3 0x08 | ||
1947 | #define HV_PERF_DRAM_PERF_CNT_REG_3 0x09 | ||
1948 | |||
1949 | /* get_perfreg() | ||
1950 | * TRAP: HV_FAST_TRAP | ||
1951 | * FUNCTION: HV_FAST_GET_PERFREG | ||
1952 | * ARG0: performance reg number | ||
1953 | * RET0: status | ||
1954 | * RET1: performance reg value | ||
1955 | * ERRORS: EINVAL Invalid performance register number | ||
1956 | * ENOACCESS No access allowed to performance counters | ||
1957 | * | ||
1958 | * Read the value of the given DRAM/JBUS performance counter/control register. | ||
1959 | */ | ||
1960 | #define HV_FAST_GET_PERFREG 0x100 | ||
1961 | |||
1962 | /* set_perfreg() | ||
1963 | * TRAP: HV_FAST_TRAP | ||
1964 | * FUNCTION: HV_FAST_SET_PERFREG | ||
1965 | * ARG0: performance reg number | ||
1966 | * ARG1: performance reg value | ||
1967 | * RET0: status | ||
1968 | * ERRORS: EINVAL Invalid performance register number | ||
1969 | * ENOACCESS No access allowed to performance counters | ||
1970 | * | ||
1971 | * Write the given performance reg value to the given DRAM/JBUS | ||
1972 | * performance counter/control register. | ||
1973 | */ | ||
1974 | #define HV_FAST_SET_PERFREG 0x101 | ||
1975 | |||
1976 | /* MMU statistics services. | ||
1977 | * | ||
1978 | * The hypervisor maintains MMU statistics and privileged code provides | ||
1979 | * a buffer where these statistics can be collected. It is continually | ||
1980 | * updated once configured. The layout is as follows: | ||
1981 | */ | ||
1982 | #ifndef __ASSEMBLY__ | ||
1983 | struct hv_mmu_statistics { | ||
1984 | unsigned long immu_tsb_hits_ctx0_8k_tte; | ||
1985 | unsigned long immu_tsb_ticks_ctx0_8k_tte; | ||
1986 | unsigned long immu_tsb_hits_ctx0_64k_tte; | ||
1987 | unsigned long immu_tsb_ticks_ctx0_64k_tte; | ||
1988 | unsigned long __reserved1[2]; | ||
1989 | unsigned long immu_tsb_hits_ctx0_4mb_tte; | ||
1990 | unsigned long immu_tsb_ticks_ctx0_4mb_tte; | ||
1991 | unsigned long __reserved2[2]; | ||
1992 | unsigned long immu_tsb_hits_ctx0_256mb_tte; | ||
1993 | unsigned long immu_tsb_ticks_ctx0_256mb_tte; | ||
1994 | unsigned long __reserved3[4]; | ||
1995 | unsigned long immu_tsb_hits_ctxnon0_8k_tte; | ||
1996 | unsigned long immu_tsb_ticks_ctxnon0_8k_tte; | ||
1997 | unsigned long immu_tsb_hits_ctxnon0_64k_tte; | ||
1998 | unsigned long immu_tsb_ticks_ctxnon0_64k_tte; | ||
1999 | unsigned long __reserved4[2]; | ||
2000 | unsigned long immu_tsb_hits_ctxnon0_4mb_tte; | ||
2001 | unsigned long immu_tsb_ticks_ctxnon0_4mb_tte; | ||
2002 | unsigned long __reserved5[2]; | ||
2003 | unsigned long immu_tsb_hits_ctxnon0_256mb_tte; | ||
2004 | unsigned long immu_tsb_ticks_ctxnon0_256mb_tte; | ||
2005 | unsigned long __reserved6[4]; | ||
2006 | unsigned long dmmu_tsb_hits_ctx0_8k_tte; | ||
2007 | unsigned long dmmu_tsb_ticks_ctx0_8k_tte; | ||
2008 | unsigned long dmmu_tsb_hits_ctx0_64k_tte; | ||
2009 | unsigned long dmmu_tsb_ticks_ctx0_64k_tte; | ||
2010 | unsigned long __reserved7[2]; | ||
2011 | unsigned long dmmu_tsb_hits_ctx0_4mb_tte; | ||
2012 | unsigned long dmmu_tsb_ticks_ctx0_4mb_tte; | ||
2013 | unsigned long __reserved8[2]; | ||
2014 | unsigned long dmmu_tsb_hits_ctx0_256mb_tte; | ||
2015 | unsigned long dmmu_tsb_ticks_ctx0_256mb_tte; | ||
2016 | unsigned long __reserved9[4]; | ||
2017 | unsigned long dmmu_tsb_hits_ctxnon0_8k_tte; | ||
2018 | unsigned long dmmu_tsb_ticks_ctxnon0_8k_tte; | ||
2019 | unsigned long dmmu_tsb_hits_ctxnon0_64k_tte; | ||
2020 | unsigned long dmmu_tsb_ticks_ctxnon0_64k_tte; | ||
2021 | unsigned long __reserved10[2]; | ||
2022 | unsigned long dmmu_tsb_hits_ctxnon0_4mb_tte; | ||
2023 | unsigned long dmmu_tsb_ticks_ctxnon0_4mb_tte; | ||
2024 | unsigned long __reserved11[2]; | ||
2025 | unsigned long dmmu_tsb_hits_ctxnon0_256mb_tte; | ||
2026 | unsigned long dmmu_tsb_ticks_ctxnon0_256mb_tte; | ||
2027 | unsigned long __reserved12[4]; | ||
2028 | }; | ||
2029 | #endif | ||
2030 | |||
2031 | /* mmustat_conf() | ||
2032 | * TRAP: HV_FAST_TRAP | ||
2033 | * FUNCTION: HV_FAST_MMUSTAT_CONF | ||
2034 | * ARG0: real address | ||
2035 | * RET0: status | ||
2036 | * RET1: real address | ||
2037 | * ERRORS: ENORADDR Invalid real address | ||
2038 | * EBADALIGN Real address not aligned on 64-byte boundary | ||
2039 | * EBADTRAP API not supported on this processor | ||
2040 | * | ||
2041 | * Enable MMU statistic gathering using the buffer at the given real | ||
2042 | * address on the current virtual CPU. The new buffer real address | ||
2043 | * is given in ARG1, and the previously specified buffer real address | ||
2044 | * is returned in RET1, or is returned as zero for the first invocation. | ||
2045 | * | ||
2046 | * If the passed in real address argument is zero, this will disable | ||
2047 | * MMU statistic collection on the current virtual CPU. If an error is | ||
2048 | * returned then no statistics are collected. | ||
2049 | * | ||
2050 | * The buffer contents should be initialized to all zeros before being | ||
2051 | * given to the hypervisor or else the statistics will be meaningless. | ||
2052 | */ | ||
2053 | #define HV_FAST_MMUSTAT_CONF 0x102 | ||
2054 | |||
2055 | /* mmustat_info() | ||
2056 | * TRAP: HV_FAST_TRAP | ||
2057 | * FUNCTION: HV_FAST_MMUSTAT_INFO | ||
2058 | * RET0: status | ||
2059 | * RET1: real address | ||
2060 | * ERRORS: EBADTRAP API not supported on this processor | ||
2061 | * | ||
2062 | * Return the current state and real address of the currently configured | ||
2063 | * MMU statistics buffer on the current virtual CPU. | ||
2064 | */ | ||
2065 | #define HV_FAST_MMUSTAT_INFO 0x103 | ||
2066 | |||
2067 | /* Function numbers for HV_CORE_TRAP. */ | ||
2068 | #define HV_CORE_VER 0x00 | ||
2069 | #define HV_CORE_PUTCHAR 0x01 | ||
2070 | #define HV_CORE_EXIT 0x02 | ||
2071 | |||
2072 | #endif /* !(_SPARC64_HYPERVISOR_H) */ | ||