aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/vmw_vmci_defs.h
diff options
context:
space:
mode:
authorGeorge Zhang <georgezhang@vmware.com>2013-01-08 18:55:59 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-01-08 19:15:57 -0500
commit20259849bb1ac1ffb0156eb359810e8b99cb644d (patch)
treef992fdf9979ddd38cd620240a19cc0e013437fd8 /include/linux/vmw_vmci_defs.h
parent8bf503991f87e32ea42a7bd69b79ba084fddc5d7 (diff)
VMCI: Some header and config files.
VMCI head config patch Adds all the necessary files to enable building of the VMCI module with the Linux Makefiles and Kconfig systems. Also adds the header files used for building modules against the driver. Signed-off-by: George Zhang <georgezhang@vmware.com> Acked-by: Andy king <acking@vmware.com> Acked-by: Dmitry Torokhov <dtor@vmware.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/vmw_vmci_defs.h')
-rw-r--r--include/linux/vmw_vmci_defs.h880
1 files changed, 880 insertions, 0 deletions
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
new file mode 100644
index 000000000000..65ac54c61c18
--- /dev/null
+++ b/include/linux/vmw_vmci_defs.h
@@ -0,0 +1,880 @@
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#ifndef _VMW_VMCI_DEF_H_
17#define _VMW_VMCI_DEF_H_
18
19#include <linux/atomic.h>
20
21/* Register offsets. */
22#define VMCI_STATUS_ADDR 0x00
23#define VMCI_CONTROL_ADDR 0x04
24#define VMCI_ICR_ADDR 0x08
25#define VMCI_IMR_ADDR 0x0c
26#define VMCI_DATA_OUT_ADDR 0x10
27#define VMCI_DATA_IN_ADDR 0x14
28#define VMCI_CAPS_ADDR 0x18
29#define VMCI_RESULT_LOW_ADDR 0x1c
30#define VMCI_RESULT_HIGH_ADDR 0x20
31
32/* Max number of devices. */
33#define VMCI_MAX_DEVICES 1
34
35/* Status register bits. */
36#define VMCI_STATUS_INT_ON 0x1
37
38/* Control register bits. */
39#define VMCI_CONTROL_RESET 0x1
40#define VMCI_CONTROL_INT_ENABLE 0x2
41#define VMCI_CONTROL_INT_DISABLE 0x4
42
43/* Capabilities register bits. */
44#define VMCI_CAPS_HYPERCALL 0x1
45#define VMCI_CAPS_GUESTCALL 0x2
46#define VMCI_CAPS_DATAGRAM 0x4
47#define VMCI_CAPS_NOTIFICATIONS 0x8
48
49/* Interrupt Cause register bits. */
50#define VMCI_ICR_DATAGRAM 0x1
51#define VMCI_ICR_NOTIFICATION 0x2
52
53/* Interrupt Mask register bits. */
54#define VMCI_IMR_DATAGRAM 0x1
55#define VMCI_IMR_NOTIFICATION 0x2
56
57/* Interrupt type. */
58enum {
59 VMCI_INTR_TYPE_INTX = 0,
60 VMCI_INTR_TYPE_MSI = 1,
61 VMCI_INTR_TYPE_MSIX = 2,
62};
63
64/* Maximum MSI/MSI-X interrupt vectors in the device. */
65#define VMCI_MAX_INTRS 2
66
67/*
68 * Supported interrupt vectors. There is one for each ICR value above,
69 * but here they indicate the position in the vector array/message ID.
70 */
71enum {
72 VMCI_INTR_DATAGRAM = 0,
73 VMCI_INTR_NOTIFICATION = 1,
74};
75
76/*
77 * A single VMCI device has an upper limit of 128MB on the amount of
78 * memory that can be used for queue pairs.
79 */
80#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
81
82/*
83 * Queues with pre-mapped data pages must be small, so that we don't pin
84 * too much kernel memory (especially on vmkernel). We limit a queuepair to
85 * 32 KB, or 16 KB per queue for symmetrical pairs.
86 */
87#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
88
89/*
90 * We have a fixed set of resource IDs available in the VMX.
91 * This allows us to have a very simple implementation since we statically
92 * know how many will create datagram handles. If a new caller arrives and
93 * we have run out of slots we can manually increment the maximum size of
94 * available resource IDs.
95 *
96 * VMCI reserved hypervisor datagram resource IDs.
97 */
98enum {
99 VMCI_RESOURCES_QUERY = 0,
100 VMCI_GET_CONTEXT_ID = 1,
101 VMCI_SET_NOTIFY_BITMAP = 2,
102 VMCI_DOORBELL_LINK = 3,
103 VMCI_DOORBELL_UNLINK = 4,
104 VMCI_DOORBELL_NOTIFY = 5,
105 /*
106 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
107 * obsoleted by the removal of VM to VM communication.
108 */
109 VMCI_DATAGRAM_REQUEST_MAP = 6,
110 VMCI_DATAGRAM_REMOVE_MAP = 7,
111 VMCI_EVENT_SUBSCRIBE = 8,
112 VMCI_EVENT_UNSUBSCRIBE = 9,
113 VMCI_QUEUEPAIR_ALLOC = 10,
114 VMCI_QUEUEPAIR_DETACH = 11,
115
116 /*
117 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
118 * WS 7.0/7.1 and ESX 4.1
119 */
120 VMCI_HGFS_TRANSPORT = 13,
121 VMCI_UNITY_PBRPC_REGISTER = 14,
122 VMCI_RPC_PRIVILEGED = 15,
123 VMCI_RPC_UNPRIVILEGED = 16,
124 VMCI_RESOURCE_MAX = 17,
125};
126
127/*
128 * struct vmci_handle - Ownership information structure
129 * @context: The VMX context ID.
130 * @resource: The resource ID (used for locating in resource hash).
131 *
132 * The vmci_handle structure is used to track resources used within
133 * vmw_vmci.
134 */
135struct vmci_handle {
136 u32 context;
137 u32 resource;
138};
139
140#define vmci_make_handle(_cid, _rid) \
141 (struct vmci_handle){ .context = _cid, .resource = _rid }
142
143static inline bool vmci_handle_is_equal(struct vmci_handle h1,
144 struct vmci_handle h2)
145{
146 return h1.context == h2.context && h1.resource == h2.resource;
147}
148
149#define VMCI_INVALID_ID ~0
150static const struct vmci_handle VMCI_INVALID_HANDLE = {
151 .context = VMCI_INVALID_ID,
152 .resource = VMCI_INVALID_ID
153};
154
155static inline bool vmci_handle_is_invalid(struct vmci_handle h)
156{
157 return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
158}
159
160/*
161 * The below defines can be used to send anonymous requests.
162 * This also indicates that no response is expected.
163 */
164#define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID
165#define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID
166static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
167 .context = VMCI_ANON_SRC_CONTEXT_ID,
168 .resource = VMCI_ANON_SRC_RESOURCE_ID
169};
170
171/* The lowest 16 context ids are reserved for internal use. */
172#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
173
174/*
175 * Hypervisor context id, used for calling into hypervisor
176 * supplied services from the VM.
177 */
178#define VMCI_HYPERVISOR_CONTEXT_ID 0
179
180/*
181 * Well-known context id, a logical context that contains a set of
182 * well-known services. This context ID is now obsolete.
183 */
184#define VMCI_WELL_KNOWN_CONTEXT_ID 1
185
186/*
187 * Context ID used by host endpoints.
188 */
189#define VMCI_HOST_CONTEXT_ID 2
190
191#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) && \
192 (_cid) > VMCI_HOST_CONTEXT_ID)
193
194/*
195 * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
196 * handles that refer to a specific context.
197 */
198#define VMCI_CONTEXT_RESOURCE_ID 0
199
200/*
201 * VMCI error codes.
202 */
203enum {
204 VMCI_SUCCESS_QUEUEPAIR_ATTACH = 5,
205 VMCI_SUCCESS_QUEUEPAIR_CREATE = 4,
206 VMCI_SUCCESS_LAST_DETACH = 3,
207 VMCI_SUCCESS_ACCESS_GRANTED = 2,
208 VMCI_SUCCESS_ENTRY_DEAD = 1,
209 VMCI_SUCCESS = 0,
210 VMCI_ERROR_INVALID_RESOURCE = (-1),
211 VMCI_ERROR_INVALID_ARGS = (-2),
212 VMCI_ERROR_NO_MEM = (-3),
213 VMCI_ERROR_DATAGRAM_FAILED = (-4),
214 VMCI_ERROR_MORE_DATA = (-5),
215 VMCI_ERROR_NO_MORE_DATAGRAMS = (-6),
216 VMCI_ERROR_NO_ACCESS = (-7),
217 VMCI_ERROR_NO_HANDLE = (-8),
218 VMCI_ERROR_DUPLICATE_ENTRY = (-9),
219 VMCI_ERROR_DST_UNREACHABLE = (-10),
220 VMCI_ERROR_PAYLOAD_TOO_LARGE = (-11),
221 VMCI_ERROR_INVALID_PRIV = (-12),
222 VMCI_ERROR_GENERIC = (-13),
223 VMCI_ERROR_PAGE_ALREADY_SHARED = (-14),
224 VMCI_ERROR_CANNOT_SHARE_PAGE = (-15),
225 VMCI_ERROR_CANNOT_UNSHARE_PAGE = (-16),
226 VMCI_ERROR_NO_PROCESS = (-17),
227 VMCI_ERROR_NO_DATAGRAM = (-18),
228 VMCI_ERROR_NO_RESOURCES = (-19),
229 VMCI_ERROR_UNAVAILABLE = (-20),
230 VMCI_ERROR_NOT_FOUND = (-21),
231 VMCI_ERROR_ALREADY_EXISTS = (-22),
232 VMCI_ERROR_NOT_PAGE_ALIGNED = (-23),
233 VMCI_ERROR_INVALID_SIZE = (-24),
234 VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
235 VMCI_ERROR_TIMEOUT = (-26),
236 VMCI_ERROR_DATAGRAM_INCOMPLETE = (-27),
237 VMCI_ERROR_INCORRECT_IRQL = (-28),
238 VMCI_ERROR_EVENT_UNKNOWN = (-29),
239 VMCI_ERROR_OBSOLETE = (-30),
240 VMCI_ERROR_QUEUEPAIR_MISMATCH = (-31),
241 VMCI_ERROR_QUEUEPAIR_NOTSET = (-32),
242 VMCI_ERROR_QUEUEPAIR_NOTOWNER = (-33),
243 VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
244 VMCI_ERROR_QUEUEPAIR_NOSPACE = (-35),
245 VMCI_ERROR_QUEUEPAIR_NODATA = (-36),
246 VMCI_ERROR_BUSMEM_INVALIDATION = (-37),
247 VMCI_ERROR_MODULE_NOT_LOADED = (-38),
248 VMCI_ERROR_DEVICE_NOT_FOUND = (-39),
249 VMCI_ERROR_QUEUEPAIR_NOT_READY = (-40),
250 VMCI_ERROR_WOULD_BLOCK = (-41),
251
252 /* VMCI clients should return error code within this range */
253 VMCI_ERROR_CLIENT_MIN = (-500),
254 VMCI_ERROR_CLIENT_MAX = (-550),
255
256 /* Internal error codes. */
257 VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
258};
259
260/* VMCI reserved events. */
261enum {
262 /* Only applicable to guest endpoints */
263 VMCI_EVENT_CTX_ID_UPDATE = 0,
264
265 /* Applicable to guest and host */
266 VMCI_EVENT_CTX_REMOVED = 1,
267
268 /* Only applicable to guest endpoints */
269 VMCI_EVENT_QP_RESUMED = 2,
270
271 /* Applicable to guest and host */
272 VMCI_EVENT_QP_PEER_ATTACH = 3,
273
274 /* Applicable to guest and host */
275 VMCI_EVENT_QP_PEER_DETACH = 4,
276
277 /*
278 * Applicable to VMX and vmk. On vmk,
279 * this event has the Context payload type.
280 */
281 VMCI_EVENT_MEM_ACCESS_ON = 5,
282
283 /*
284 * Applicable to VMX and vmk. Same as
285 * above for the payload type.
286 */
287 VMCI_EVENT_MEM_ACCESS_OFF = 6,
288 VMCI_EVENT_MAX = 7,
289};
290
291/*
292 * Of the above events, a few are reserved for use in the VMX, and
293 * other endpoints (guest and host kernel) should not use them. For
294 * the rest of the events, we allow both host and guest endpoints to
295 * subscribe to them, to maintain the same API for host and guest
296 * endpoints.
297 */
298#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
299 (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
300
301#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX && \
302 !VMCI_EVENT_VALID_VMX(_event))
303
304/* Reserved guest datagram resource ids. */
305#define VMCI_EVENT_HANDLER 0
306
307/*
308 * VMCI coarse-grained privileges (per context or host
309 * process/endpoint. An entity with the restricted flag is only
310 * allowed to interact with the hypervisor and trusted entities.
311 */
312enum {
313 VMCI_NO_PRIVILEGE_FLAGS = 0,
314 VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
315 VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
316 VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
317 VMCI_PRIVILEGE_FLAG_TRUSTED),
318 VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
319 VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
320 VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
321};
322
323/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
324#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
325
326/*
327 * Driver version.
328 *
329 * Increment major version when you make an incompatible change.
330 * Compatibility goes both ways (old driver with new executable
331 * as well as new driver with old executable).
332 */
333
334/* Never change VMCI_VERSION_SHIFT_WIDTH */
335#define VMCI_VERSION_SHIFT_WIDTH 16
336#define VMCI_MAKE_VERSION(_major, _minor) \
337 ((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
338
339#define VMCI_VERSION_MAJOR(v) ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
340#define VMCI_VERSION_MINOR(v) ((u16) (v))
341
342/*
343 * VMCI_VERSION is always the current version. Subsequently listed
344 * versions are ways of detecting previous versions of the connecting
345 * application (i.e., VMX).
346 *
347 * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
348 * communication.
349 *
350 * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
351 * support.
352 *
353 * VMCI_VERSION_HOSTQP: This version introduced host end point support
354 * for hosted products.
355 *
356 * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
357 * support for host end-points.
358 *
359 * VMCI_VERSION_PREVERS2: This fictional version number is intended to
360 * represent the version of a VMX which doesn't call into the driver
361 * with ioctl VERSION2 and thus doesn't establish its version with the
362 * driver.
363 */
364
365#define VMCI_VERSION VMCI_VERSION_NOVMVM
366#define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0)
367#define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0)
368#define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0)
369#define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0)
370#define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0)
371
372#define VMCI_SOCKETS_MAKE_VERSION(_p) \
373 ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
374
375/*
376 * The VMCI IOCTLs. We use identity code 7, as noted in ioctl-number.h, and
377 * we start at sequence 9f. This gives us the same values that our shipping
378 * products use, starting at 1951, provided we leave out the direction and
379 * structure size. Note that VMMon occupies the block following us, starting
380 * at 2001.
381 */
382#define IOCTL_VMCI_VERSION _IO(7, 0x9f) /* 1951 */
383#define IOCTL_VMCI_INIT_CONTEXT _IO(7, 0xa0)
384#define IOCTL_VMCI_QUEUEPAIR_SETVA _IO(7, 0xa4)
385#define IOCTL_VMCI_NOTIFY_RESOURCE _IO(7, 0xa5)
386#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE _IO(7, 0xa6)
387#define IOCTL_VMCI_VERSION2 _IO(7, 0xa7)
388#define IOCTL_VMCI_QUEUEPAIR_ALLOC _IO(7, 0xa8)
389#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE _IO(7, 0xa9)
390#define IOCTL_VMCI_QUEUEPAIR_DETACH _IO(7, 0xaa)
391#define IOCTL_VMCI_DATAGRAM_SEND _IO(7, 0xab)
392#define IOCTL_VMCI_DATAGRAM_RECEIVE _IO(7, 0xac)
393#define IOCTL_VMCI_CTX_ADD_NOTIFICATION _IO(7, 0xaf)
394#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION _IO(7, 0xb0)
395#define IOCTL_VMCI_CTX_GET_CPT_STATE _IO(7, 0xb1)
396#define IOCTL_VMCI_CTX_SET_CPT_STATE _IO(7, 0xb2)
397#define IOCTL_VMCI_GET_CONTEXT_ID _IO(7, 0xb3)
398#define IOCTL_VMCI_SOCKETS_VERSION _IO(7, 0xb4)
399#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE _IO(7, 0xb8)
400#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
401#define IOCTL_VMCI_SET_NOTIFY _IO(7, 0xcb) /* 1995 */
402/*IOCTL_VMMON_START _IO(7, 0xd1)*/ /* 2001 */
403
404/*
405 * struct vmci_queue_header - VMCI Queue Header information.
406 *
407 * A Queue cannot stand by itself as designed. Each Queue's header
408 * contains a pointer into itself (the producer_tail) and into its peer
409 * (consumer_head). The reason for the separation is one of
410 * accessibility: Each end-point can modify two things: where the next
411 * location to enqueue is within its produce_q (producer_tail); and
412 * where the next dequeue location is in its consume_q (consumer_head).
413 *
414 * An end-point cannot modify the pointers of its peer (guest to
415 * guest; NOTE that in the host both queue headers are mapped r/w).
416 * But, each end-point needs read access to both Queue header
417 * structures in order to determine how much space is used (or left)
418 * in the Queue. This is because for an end-point to know how full
419 * its produce_q is, it needs to use the consumer_head that points into
420 * the produce_q but -that- consumer_head is in the Queue header for
421 * that end-points consume_q.
422 *
423 * Thoroughly confused? Sorry.
424 *
425 * producer_tail: the point to enqueue new entrants. When you approach
426 * a line in a store, for example, you walk up to the tail.
427 *
428 * consumer_head: the point in the queue from which the next element is
429 * dequeued. In other words, who is next in line is he who is at the
430 * head of the line.
431 *
432 * Also, producer_tail points to an empty byte in the Queue, whereas
433 * consumer_head points to a valid byte of data (unless producer_tail ==
434 * consumer_head in which case consumer_head does not point to a valid
435 * byte of data).
436 *
437 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
438 * the range [0, size-1].
439 *
440 * If produce_q_header->producer_tail == consume_q_header->consumer_head
441 * then the produce_q is empty.
442 */
443struct vmci_queue_header {
444 /* All fields are 64bit and aligned. */
445 struct vmci_handle handle; /* Identifier. */
446 atomic64_t producer_tail; /* Offset in this queue. */
447 atomic64_t consumer_head; /* Offset in peer queue. */
448};
449
450/*
451 * struct vmci_datagram - Base struct for vmci datagrams.
452 * @dst: A vmci_handle that tracks the destination of the datagram.
453 * @src: A vmci_handle that tracks the source of the datagram.
454 * @payload_size: The size of the payload.
455 *
456 * vmci_datagram structs are used when sending vmci datagrams. They include
457 * the necessary source and destination information to properly route
458 * the information along with the size of the package.
459 */
460struct vmci_datagram {
461 struct vmci_handle dst;
462 struct vmci_handle src;
463 u64 payload_size;
464};
465
466/*
467 * Second flag is for creating a well-known handle instead of a per context
468 * handle. Next flag is for deferring datagram delivery, so that the
469 * datagram callback is invoked in a delayed context (not interrupt context).
470 */
471#define VMCI_FLAG_DG_NONE 0
472#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
473#define VMCI_FLAG_ANYCID_DG_HND 0x2
474#define VMCI_FLAG_DG_DELAYED_CB 0x4
475
476/*
477 * Maximum supported size of a VMCI datagram for routable datagrams.
478 * Datagrams going to the hypervisor are allowed to be larger.
479 */
480#define VMCI_MAX_DG_SIZE (17 * 4096)
481#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
482 sizeof(struct vmci_datagram))
483#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + \
484 sizeof(struct vmci_datagram))
485#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
486#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
487#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
488#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
489
490struct vmci_event_payload_qp {
491 struct vmci_handle handle; /* queue_pair handle. */
492 u32 peer_id; /* Context id of attaching/detaching VM. */
493 u32 _pad;
494};
495
496/* Flags for VMCI queue_pair API. */
497enum {
498 /* Fail alloc if QP not created by peer. */
499 VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
500
501 /* Only allow attaches from local context. */
502 VMCI_QPFLAG_LOCAL = 1 << 1,
503
504 /* Host won't block when guest is quiesced. */
505 VMCI_QPFLAG_NONBLOCK = 1 << 2,
506
507 /* Pin data pages in ESX. Used with NONBLOCK */
508 VMCI_QPFLAG_PINNED = 1 << 3,
509
510 /* Update the following flag when adding new flags. */
511 VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
512 VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
513
514 /* Convenience flags */
515 VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
516 VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
517};
518
519/*
520 * We allow at least 1024 more event datagrams from the hypervisor past the
521 * normally allowed datagrams pending for a given context. We define this
522 * limit on event datagrams from the hypervisor to guard against DoS attack
523 * from a malicious VM which could repeatedly attach to and detach from a queue
524 * pair, causing events to be queued at the destination VM. However, the rate
525 * at which such events can be generated is small since it requires a VM exit
526 * and handling of queue pair attach/detach call at the hypervisor. Event
527 * datagrams may be queued up at the destination VM if it has interrupts
528 * disabled or if it is not draining events for some other reason. 1024
529 * datagrams is a grossly conservative estimate of the time for which
530 * interrupts may be disabled in the destination VM, but at the same time does
531 * not exacerbate the memory pressure problem on the host by much (size of each
532 * event datagram is small).
533 */
534#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \
535 (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \
536 1024 * (sizeof(struct vmci_datagram) + \
537 sizeof(struct vmci_event_data_max)))
538
539/*
540 * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
541 * hypervisor resources. Struct size is 16 bytes. All fields in struct are
542 * aligned to their natural alignment.
543 */
544struct vmci_resource_query_hdr {
545 struct vmci_datagram hdr;
546 u32 num_resources;
547 u32 _padding;
548};
549
550/*
551 * Convenience struct for negotiating vectors. Must match layout of
552 * VMCIResourceQueryHdr minus the struct vmci_datagram header.
553 */
554struct vmci_resource_query_msg {
555 u32 num_resources;
556 u32 _padding;
557 u32 resources[1];
558};
559
560/*
561 * The maximum number of resources that can be queried using
562 * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
563 * bits of a positive return value. Negative values are reserved for
564 * errors.
565 */
566#define VMCI_RESOURCE_QUERY_MAX_NUM 31
567
568/* Maximum size for the VMCI_RESOURCE_QUERY request. */
569#define VMCI_RESOURCE_QUERY_MAX_SIZE \
570 (sizeof(struct vmci_resource_query_hdr) + \
571 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
572
573/*
574 * Struct used for setting the notification bitmap. All fields in
575 * struct are aligned to their natural alignment.
576 */
577struct vmci_notify_bm_set_msg {
578 struct vmci_datagram hdr;
579 u32 bitmap_ppn;
580 u32 _pad;
581};
582
583/*
584 * Struct used for linking a doorbell handle with an index in the
585 * notify bitmap. All fields in struct are aligned to their natural
586 * alignment.
587 */
588struct vmci_doorbell_link_msg {
589 struct vmci_datagram hdr;
590 struct vmci_handle handle;
591 u64 notify_idx;
592};
593
594/*
595 * Struct used for unlinking a doorbell handle from an index in the
596 * notify bitmap. All fields in struct are aligned to their natural
597 * alignment.
598 */
599struct vmci_doorbell_unlink_msg {
600 struct vmci_datagram hdr;
601 struct vmci_handle handle;
602};
603
604/*
605 * Struct used for generating a notification on a doorbell handle. All
606 * fields in struct are aligned to their natural alignment.
607 */
608struct vmci_doorbell_notify_msg {
609 struct vmci_datagram hdr;
610 struct vmci_handle handle;
611};
612
613/*
614 * This struct is used to contain data for events. Size of this struct is a
615 * multiple of 8 bytes, and all fields are aligned to their natural alignment.
616 */
617struct vmci_event_data {
618 u32 event; /* 4 bytes. */
619 u32 _pad;
620 /* Event payload is put here. */
621};
622
623/*
624 * Define the different VMCI_EVENT payload data types here. All structs must
625 * be a multiple of 8 bytes, and fields must be aligned to their natural
626 * alignment.
627 */
628struct vmci_event_payld_ctx {
629 u32 context_id; /* 4 bytes. */
630 u32 _pad;
631};
632
633struct vmci_event_payld_qp {
634 struct vmci_handle handle; /* queue_pair handle. */
635 u32 peer_id; /* Context id of attaching/detaching VM. */
636 u32 _pad;
637};
638
639/*
640 * We define the following struct to get the size of the maximum event
641 * data the hypervisor may send to the guest. If adding a new event
642 * payload type above, add it to the following struct too (inside the
643 * union).
644 */
645struct vmci_event_data_max {
646 struct vmci_event_data event_data;
647 union {
648 struct vmci_event_payld_ctx context_payload;
649 struct vmci_event_payld_qp qp_payload;
650 } ev_data_payload;
651};
652
653/*
654 * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
655 * VMCI_EVENT_HANDLER messages. Struct size is 32 bytes. All fields
656 * in struct are aligned to their natural alignment.
657 */
658struct vmci_event_msg {
659 struct vmci_datagram hdr;
660
661 /* Has event type and payload. */
662 struct vmci_event_data event_data;
663
664 /* Payload gets put here. */
665};
666
667/* Event with context payload. */
668struct vmci_event_ctx {
669 struct vmci_event_msg msg;
670 struct vmci_event_payld_ctx payload;
671};
672
673/* Event with QP payload. */
674struct vmci_event_qp {
675 struct vmci_event_msg msg;
676 struct vmci_event_payld_qp payload;
677};
678
679/*
680 * Structs used for queue_pair alloc and detach messages. We align fields of
681 * these structs to 64bit boundaries.
682 */
683struct vmci_qp_alloc_msg {
684 struct vmci_datagram hdr;
685 struct vmci_handle handle;
686 u32 peer;
687 u32 flags;
688 u64 produce_size;
689 u64 consume_size;
690 u64 num_ppns;
691
692 /* List of PPNs placed here. */
693};
694
695struct vmci_qp_detach_msg {
696 struct vmci_datagram hdr;
697 struct vmci_handle handle;
698};
699
700/* VMCI Doorbell API. */
701#define VMCI_FLAG_DELAYED_CB 0x01
702
703typedef void (*vmci_callback) (void *client_data);
704
705/*
706 * struct vmci_qp - A vmw_vmci queue pair handle.
707 *
708 * This structure is used as a handle to a queue pair created by
709 * VMCI. It is intentionally left opaque to clients.
710 */
711struct vmci_qp;
712
713/* Callback needed for correctly waiting on events. */
714typedef int (*vmci_datagram_recv_cb) (void *client_data,
715 struct vmci_datagram *msg);
716
717/* VMCI Event API. */
718typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
719 void *client_data);
720
721/*
722 * We use the following inline function to access the payload data
723 * associated with an event data.
724 */
725static inline const void *
726vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
727{
728 return (const char *)ev_data + sizeof(*ev_data);
729}
730
731static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
732{
733 return (void *)vmci_event_data_const_payload(ev_data);
734}
735
736/*
737 * Helper to add a given offset to a head or tail pointer. Wraps the
738 * value of the pointer around the max size of the queue.
739 */
740static inline void vmci_qp_add_pointer(atomic64_t *var,
741 size_t add,
742 u64 size)
743{
744 u64 new_val = atomic64_read(var);
745
746 if (new_val >= size - add)
747 new_val -= size;
748
749 new_val += add;
750
751 atomic64_set(var, new_val);
752}
753
754/*
755 * Helper routine to get the Producer Tail from the supplied queue.
756 */
757static inline u64
758vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
759{
760 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
761 return atomic64_read(&qh->producer_tail);
762}
763
764/*
765 * Helper routine to get the Consumer Head from the supplied queue.
766 */
767static inline u64
768vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
769{
770 struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
771 return atomic64_read(&qh->consumer_head);
772}
773
774/*
775 * Helper routine to increment the Producer Tail. Fundamentally,
776 * vmci_qp_add_pointer() is used to manipulate the tail itself.
777 */
778static inline void
779vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
780 size_t add,
781 u64 queue_size)
782{
783 vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
784}
785
786/*
787 * Helper routine to increment the Consumer Head. Fundamentally,
788 * vmci_qp_add_pointer() is used to manipulate the head itself.
789 */
790static inline void
791vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
792 size_t add,
793 u64 queue_size)
794{
795 vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
796}
797
798/*
799 * Helper routine for getting the head and the tail pointer for a queue.
800 * Both the VMCIQueues are needed to get both the pointers for one queue.
801 */
802static inline void
803vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
804 const struct vmci_queue_header *consume_q_header,
805 u64 *producer_tail,
806 u64 *consumer_head)
807{
808 if (producer_tail)
809 *producer_tail = vmci_q_header_producer_tail(produce_q_header);
810
811 if (consumer_head)
812 *consumer_head = vmci_q_header_consumer_head(consume_q_header);
813}
814
815static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
816 const struct vmci_handle handle)
817{
818 q_header->handle = handle;
819 atomic64_set(&q_header->producer_tail, 0);
820 atomic64_set(&q_header->consumer_head, 0);
821}
822
823/*
824 * Finds available free space in a produce queue to enqueue more
825 * data or reports an error if queue pair corruption is detected.
826 */
827static s64
828vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
829 const struct vmci_queue_header *consume_q_header,
830 const u64 produce_q_size)
831{
832 u64 tail;
833 u64 head;
834 u64 free_space;
835
836 tail = vmci_q_header_producer_tail(produce_q_header);
837 head = vmci_q_header_consumer_head(consume_q_header);
838
839 if (tail >= produce_q_size || head >= produce_q_size)
840 return VMCI_ERROR_INVALID_SIZE;
841
842 /*
843 * Deduct 1 to avoid tail becoming equal to head which causes
844 * ambiguity. If head and tail are equal it means that the
845 * queue is empty.
846 */
847 if (tail >= head)
848 free_space = produce_q_size - (tail - head) - 1;
849 else
850 free_space = head - tail - 1;
851
852 return free_space;
853}
854
855/*
856 * vmci_q_header_free_space() does all the heavy lifting of
857 * determing the number of free bytes in a Queue. This routine,
858 * then subtracts that size from the full size of the Queue so
859 * the caller knows how many bytes are ready to be dequeued.
860 * Results:
861 * On success, available data size in bytes (up to MAX_INT64).
862 * On failure, appropriate error code.
863 */
864static inline s64
865vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
866 const struct vmci_queue_header *produce_q_header,
867 const u64 consume_q_size)
868{
869 s64 free_space;
870
871 free_space = vmci_q_header_free_space(consume_q_header,
872 produce_q_header, consume_q_size);
873 if (free_space < VMCI_SUCCESS)
874 return free_space;
875
876 return consume_q_size - free_space - 1;
877}
878
879
880#endif /* _VMW_VMCI_DEF_H_ */