aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virt
diff options
context:
space:
mode:
authorHans de Goede <hdegoede@redhat.com>2017-11-30 11:01:26 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-12-18 10:12:21 -0500
commit579db9d45cb4e8e7cedff9e6079331a1e2ea9f5d (patch)
tree33d8b27da0e76cfbc9ca965ad7ead2f5fe595f0e /drivers/virt
parentf6ddd094f5793447d594aa9f42032a7aba12b4d2 (diff)
virt: Add vboxguest VMMDEV communication code
This commits adds a header describing the hardware interface for the Virtual Box Guest PCI device used in Virtual Box virtual machines and utility functions for talking to the Virtual Box hypervisor over this interface. These utility functions will used both by the vboxguest driver for the PCI device which offers the /dev/vboxguest ioctl API and by the vboxfs driver which offers folder sharing support. Signed-off-by: Hans de Goede <hdegoede@redhat.com> Reviewed-by: Larry Finger <Larry.Finger@lwfinger.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/virt')
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c801
-rw-r--r--drivers/virt/vboxguest/vmmdev.h449
2 files changed, 1250 insertions, 0 deletions
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
new file mode 100644
index 000000000000..8daea691fbb5
--- /dev/null
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -0,0 +1,801 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2/*
3 * vboxguest vmm-req and hgcm-call code, VBoxGuestR0LibHGCMInternal.cpp,
4 * VBoxGuestR0LibGenericRequest.cpp and RTErrConvertToErrno.cpp in vbox svn.
5 *
6 * Copyright (C) 2006-2016 Oracle Corporation
7 */
8
9#include <linux/errno.h>
10#include <linux/kernel.h>
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/sizes.h>
14#include <linux/slab.h>
15#include <linux/uaccess.h>
16#include <linux/vmalloc.h>
17#include <linux/vbox_err.h>
18#include <linux/vbox_utils.h>
19#include "vboxguest_core.h"
20
21/* Get the pointer to the first parameter of a HGCM call request. */
22#define VMMDEV_HGCM_CALL_PARMS(a) \
23 ((struct vmmdev_hgcm_function_parameter *)( \
24 (u8 *)(a) + sizeof(struct vmmdev_hgcm_call)))
25
26/* The max parameter buffer size for a user request. */
27#define VBG_MAX_HGCM_USER_PARM (24 * SZ_1M)
28/* The max parameter buffer size for a kernel request. */
29#define VBG_MAX_HGCM_KERNEL_PARM (16 * SZ_1M)
30
31#define VBG_DEBUG_PORT 0x504
32
33/* This protects vbg_log_buf and serializes VBG_DEBUG_PORT accesses */
34static DEFINE_SPINLOCK(vbg_log_lock);
35static char vbg_log_buf[128];
36
37#define VBG_LOG(name, pr_func) \
38void name(const char *fmt, ...) \
39{ \
40 unsigned long flags; \
41 va_list args; \
42 int i, count; \
43 \
44 va_start(args, fmt); \
45 spin_lock_irqsave(&vbg_log_lock, flags); \
46 \
47 count = vscnprintf(vbg_log_buf, sizeof(vbg_log_buf), fmt, args);\
48 for (i = 0; i < count; i++) \
49 outb(vbg_log_buf[i], VBG_DEBUG_PORT); \
50 \
51 pr_func("%s", vbg_log_buf); \
52 \
53 spin_unlock_irqrestore(&vbg_log_lock, flags); \
54 va_end(args); \
55} \
56EXPORT_SYMBOL(name)
57
58VBG_LOG(vbg_info, pr_info);
59VBG_LOG(vbg_warn, pr_warn);
60VBG_LOG(vbg_err, pr_err);
61#if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
62VBG_LOG(vbg_debug, pr_debug);
63#endif
64
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
66{
67 struct vmmdev_request_header *req;
68
69 req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
70 if (!req)
71 return NULL;
72
73 memset(req, 0xaa, len);
74
75 req->size = len;
76 req->version = VMMDEV_REQUEST_HEADER_VERSION;
77 req->request_type = req_type;
78 req->rc = VERR_GENERAL_FAILURE;
79 req->reserved1 = 0;
80 req->reserved2 = 0;
81
82 return req;
83}
84
85/* Note this function returns a VBox status code, not a negative errno!! */
86int vbg_req_perform(struct vbg_dev *gdev, void *req)
87{
88 unsigned long phys_req = virt_to_phys(req);
89
90 outl(phys_req, gdev->io_port + VMMDEV_PORT_OFF_REQUEST);
91 /*
92 * The host changes the request as a result of the outl, make sure
93 * the outl and any reads of the req happen in the correct order.
94 */
95 mb();
96
97 return ((struct vmmdev_request_header *)req)->rc;
98}
99
100static bool hgcm_req_done(struct vbg_dev *gdev,
101 struct vmmdev_hgcmreq_header *header)
102{
103 unsigned long flags;
104 bool done;
105
106 spin_lock_irqsave(&gdev->event_spinlock, flags);
107 done = header->flags & VMMDEV_HGCM_REQ_DONE;
108 spin_unlock_irqrestore(&gdev->event_spinlock, flags);
109
110 return done;
111}
112
113int vbg_hgcm_connect(struct vbg_dev *gdev,
114 struct vmmdev_hgcm_service_location *loc,
115 u32 *client_id, int *vbox_status)
116{
117 struct vmmdev_hgcm_connect *hgcm_connect = NULL;
118 int rc;
119
120 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
121 VMMDEVREQ_HGCM_CONNECT);
122 if (!hgcm_connect)
123 return -ENOMEM;
124
125 hgcm_connect->header.flags = 0;
126 memcpy(&hgcm_connect->loc, loc, sizeof(*loc));
127 hgcm_connect->client_id = 0;
128
129 rc = vbg_req_perform(gdev, hgcm_connect);
130
131 if (rc == VINF_HGCM_ASYNC_EXECUTE)
132 wait_event(gdev->hgcm_wq,
133 hgcm_req_done(gdev, &hgcm_connect->header));
134
135 if (rc >= 0) {
136 *client_id = hgcm_connect->client_id;
137 rc = hgcm_connect->header.result;
138 }
139
140 kfree(hgcm_connect);
141
142 *vbox_status = rc;
143 return 0;
144}
145EXPORT_SYMBOL(vbg_hgcm_connect);
146
147int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
148{
149 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
150 int rc;
151
152 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
153 VMMDEVREQ_HGCM_DISCONNECT);
154 if (!hgcm_disconnect)
155 return -ENOMEM;
156
157 hgcm_disconnect->header.flags = 0;
158 hgcm_disconnect->client_id = client_id;
159
160 rc = vbg_req_perform(gdev, hgcm_disconnect);
161
162 if (rc == VINF_HGCM_ASYNC_EXECUTE)
163 wait_event(gdev->hgcm_wq,
164 hgcm_req_done(gdev, &hgcm_disconnect->header));
165
166 if (rc >= 0)
167 rc = hgcm_disconnect->header.result;
168
169 kfree(hgcm_disconnect);
170
171 *vbox_status = rc;
172 return 0;
173}
174EXPORT_SYMBOL(vbg_hgcm_disconnect);
175
176static u32 hgcm_call_buf_size_in_pages(void *buf, u32 len)
177{
178 u32 size = PAGE_ALIGN(len + ((unsigned long)buf & ~PAGE_MASK));
179
180 return size >> PAGE_SHIFT;
181}
182
183static void hgcm_call_add_pagelist_size(void *buf, u32 len, size_t *extra)
184{
185 u32 page_count;
186
187 page_count = hgcm_call_buf_size_in_pages(buf, len);
188 *extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
189}
190
191static int hgcm_call_preprocess_linaddr(
192 const struct vmmdev_hgcm_function_parameter *src_parm,
193 void **bounce_buf_ret, size_t *extra)
194{
195 void *buf, *bounce_buf;
196 bool copy_in;
197 u32 len;
198 int ret;
199
200 buf = (void *)src_parm->u.pointer.u.linear_addr;
201 len = src_parm->u.pointer.size;
202 copy_in = src_parm->type != VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT;
203
204 if (len > VBG_MAX_HGCM_USER_PARM)
205 return -E2BIG;
206
207 bounce_buf = kvmalloc(len, GFP_KERNEL);
208 if (!bounce_buf)
209 return -ENOMEM;
210
211 if (copy_in) {
212 ret = copy_from_user(bounce_buf, (void __user *)buf, len);
213 if (ret)
214 return -EFAULT;
215 } else {
216 memset(bounce_buf, 0, len);
217 }
218
219 *bounce_buf_ret = bounce_buf;
220 hgcm_call_add_pagelist_size(bounce_buf, len, extra);
221 return 0;
222}
223
224/**
225 * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and
226 * figure out how much extra storage we need for page lists.
227 * Return: 0 or negative errno value.
228 * @src_parm: Pointer to source function call parameters
229 * @parm_count: Number of function call parameters.
230 * @bounce_bufs_ret: Where to return the allocated bouncebuffer array
231 * @extra: Where to return the extra request space needed for
232 * physical page lists.
233 */
234static int hgcm_call_preprocess(
235 const struct vmmdev_hgcm_function_parameter *src_parm,
236 u32 parm_count, void ***bounce_bufs_ret, size_t *extra)
237{
238 void *buf, **bounce_bufs = NULL;
239 u32 i, len;
240 int ret;
241
242 for (i = 0; i < parm_count; i++, src_parm++) {
243 switch (src_parm->type) {
244 case VMMDEV_HGCM_PARM_TYPE_32BIT:
245 case VMMDEV_HGCM_PARM_TYPE_64BIT:
246 break;
247
248 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
249 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
250 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
251 if (!bounce_bufs) {
252 bounce_bufs = kcalloc(parm_count,
253 sizeof(void *),
254 GFP_KERNEL);
255 if (!bounce_bufs)
256 return -ENOMEM;
257
258 *bounce_bufs_ret = bounce_bufs;
259 }
260
261 ret = hgcm_call_preprocess_linaddr(src_parm,
262 &bounce_bufs[i],
263 extra);
264 if (ret)
265 return ret;
266
267 break;
268
269 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
270 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
271 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
272 buf = (void *)src_parm->u.pointer.u.linear_addr;
273 len = src_parm->u.pointer.size;
274 if (WARN_ON(len > VBG_MAX_HGCM_KERNEL_PARM))
275 return -E2BIG;
276
277 hgcm_call_add_pagelist_size(buf, len, extra);
278 break;
279
280 default:
281 return -EINVAL;
282 }
283 }
284
285 return 0;
286}
287
288/**
289 * Translates linear address types to page list direction flags.
290 *
291 * Return: page list flags.
292 * @type: The type.
293 */
294static u32 hgcm_call_linear_addr_type_to_pagelist_flags(
295 enum vmmdev_hgcm_function_parameter_type type)
296{
297 switch (type) {
298 default:
299 WARN_ON(1);
300 /* Fall through */
301 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
302 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
303 return VMMDEV_HGCM_F_PARM_DIRECTION_BOTH;
304
305 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
306 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
307 return VMMDEV_HGCM_F_PARM_DIRECTION_TO_HOST;
308
309 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
310 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
311 return VMMDEV_HGCM_F_PARM_DIRECTION_FROM_HOST;
312 }
313}
314
315static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
316 struct vmmdev_hgcm_function_parameter *dst_parm, void *buf, u32 len,
317 enum vmmdev_hgcm_function_parameter_type type, u32 *off_extra)
318{
319 struct vmmdev_hgcm_pagelist *dst_pg_lst;
320 struct page *page;
321 bool is_vmalloc;
322 u32 i, page_count;
323
324 dst_parm->type = type;
325
326 if (len == 0) {
327 dst_parm->u.pointer.size = 0;
328 dst_parm->u.pointer.u.linear_addr = 0;
329 return;
330 }
331
332 dst_pg_lst = (void *)call + *off_extra;
333 page_count = hgcm_call_buf_size_in_pages(buf, len);
334 is_vmalloc = is_vmalloc_addr(buf);
335
336 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_PAGELIST;
337 dst_parm->u.page_list.size = len;
338 dst_parm->u.page_list.offset = *off_extra;
339 dst_pg_lst->flags = hgcm_call_linear_addr_type_to_pagelist_flags(type);
340 dst_pg_lst->offset_first_page = (unsigned long)buf & ~PAGE_MASK;
341 dst_pg_lst->page_count = page_count;
342
343 for (i = 0; i < page_count; i++) {
344 if (is_vmalloc)
345 page = vmalloc_to_page(buf);
346 else
347 page = virt_to_page(buf);
348
349 dst_pg_lst->pages[i] = page_to_phys(page);
350 buf += PAGE_SIZE;
351 }
352
353 *off_extra += offsetof(struct vmmdev_hgcm_pagelist, pages[page_count]);
354}
355
356/**
357 * Initializes the call request that we're sending to the host.
358 * @call: The call to initialize.
359 * @client_id: The client ID of the caller.
360 * @function: The function number of the function to call.
361 * @src_parm: Pointer to source function call parameters.
362 * @parm_count: Number of function call parameters.
363 * @bounce_bufs: The bouncebuffer array.
364 */
365static void hgcm_call_init_call(
366 struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
367 const struct vmmdev_hgcm_function_parameter *src_parm,
368 u32 parm_count, void **bounce_bufs)
369{
370 struct vmmdev_hgcm_function_parameter *dst_parm =
371 VMMDEV_HGCM_CALL_PARMS(call);
372 u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
373 void *buf;
374
375 call->header.flags = 0;
376 call->header.result = VINF_SUCCESS;
377 call->client_id = client_id;
378 call->function = function;
379 call->parm_count = parm_count;
380
381 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
382 switch (src_parm->type) {
383 case VMMDEV_HGCM_PARM_TYPE_32BIT:
384 case VMMDEV_HGCM_PARM_TYPE_64BIT:
385 *dst_parm = *src_parm;
386 break;
387
388 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
389 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
390 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
391 hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
392 src_parm->u.pointer.size,
393 src_parm->type, &off_extra);
394 break;
395
396 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
397 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
398 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
399 buf = (void *)src_parm->u.pointer.u.linear_addr;
400 hgcm_call_init_linaddr(call, dst_parm, buf,
401 src_parm->u.pointer.size,
402 src_parm->type, &off_extra);
403 break;
404
405 default:
406 WARN_ON(1);
407 dst_parm->type = VMMDEV_HGCM_PARM_TYPE_INVALID;
408 }
409 }
410}
411
412/**
413 * Tries to cancel a pending HGCM call.
414 *
415 * Return: VBox status code
416 */
417static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
418{
419 int rc;
420
421 /*
422 * We use a pre-allocated request for cancellations, which is
423 * protected by cancel_req_mutex. This means that all cancellations
424 * get serialized, this should be fine since they should be rare.
425 */
426 mutex_lock(&gdev->cancel_req_mutex);
427 gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
428 rc = vbg_req_perform(gdev, gdev->cancel_req);
429 mutex_unlock(&gdev->cancel_req_mutex);
430
431 if (rc == VERR_NOT_IMPLEMENTED) {
432 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
433 call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
434
435 rc = vbg_req_perform(gdev, call);
436 if (rc == VERR_INVALID_PARAMETER)
437 rc = VERR_NOT_FOUND;
438 }
439
440 if (rc >= 0)
441 call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
442
443 return rc;
444}
445
446/**
447 * Performs the call and completion wait.
448 * Return: 0 or negative errno value.
449 * @gdev: The VBoxGuest device extension.
450 * @call: The call to execute.
451 * @timeout_ms: Timeout in ms.
452 * @leak_it: Where to return the leak it / free it, indicator.
453 * Cancellation fun.
454 */
455static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
456 u32 timeout_ms, bool *leak_it)
457{
458 int rc, cancel_rc, ret;
459 long timeout;
460
461 *leak_it = false;
462
463 rc = vbg_req_perform(gdev, call);
464
465 /*
466 * If the call failed, then pretend success. Upper layers will
467 * interpret the result code in the packet.
468 */
469 if (rc < 0) {
470 call->header.result = rc;
471 return 0;
472 }
473
474 if (rc != VINF_HGCM_ASYNC_EXECUTE)
475 return 0;
476
477 /* Host decided to process the request asynchronously, wait for it */
478 if (timeout_ms == U32_MAX)
479 timeout = MAX_SCHEDULE_TIMEOUT;
480 else
481 timeout = msecs_to_jiffies(timeout_ms);
482
483 timeout = wait_event_interruptible_timeout(
484 gdev->hgcm_wq,
485 hgcm_req_done(gdev, &call->header),
486 timeout);
487
488 /* timeout > 0 means hgcm_req_done has returned true, so success */
489 if (timeout > 0)
490 return 0;
491
492 if (timeout == 0)
493 ret = -ETIMEDOUT;
494 else
495 ret = -EINTR;
496
497 /* Cancel the request */
498 cancel_rc = hgcm_cancel_call(gdev, call);
499 if (cancel_rc >= 0)
500 return ret;
501
502 /*
503 * Failed to cancel, this should mean that the cancel has lost the
504 * race with normal completion, wait while the host completes it.
505 */
506 if (cancel_rc == VERR_NOT_FOUND || cancel_rc == VERR_SEM_DESTROYED)
507 timeout = msecs_to_jiffies(500);
508 else
509 timeout = msecs_to_jiffies(2000);
510
511 timeout = wait_event_timeout(gdev->hgcm_wq,
512 hgcm_req_done(gdev, &call->header),
513 timeout);
514
515 if (WARN_ON(timeout == 0)) {
516 /* We really should never get here */
517 vbg_err("%s: Call timedout and cancellation failed, leaking the request\n",
518 __func__);
519 *leak_it = true;
520 return ret;
521 }
522
523 /* The call has completed normally after all */
524 return 0;
525}
526
527/**
528 * Copies the result of the call back to the caller info structure and user
529 * buffers.
530 * Return: 0 or negative errno value.
531 * @call: HGCM call request.
532 * @dst_parm: Pointer to function call parameters destination.
533 * @parm_count: Number of function call parameters.
534 * @bounce_bufs: The bouncebuffer array.
535 */
536static int hgcm_call_copy_back_result(
537 const struct vmmdev_hgcm_call *call,
538 struct vmmdev_hgcm_function_parameter *dst_parm,
539 u32 parm_count, void **bounce_bufs)
540{
541 const struct vmmdev_hgcm_function_parameter *src_parm =
542 VMMDEV_HGCM_CALL_PARMS(call);
543 void __user *p;
544 int ret;
545 u32 i;
546
547 /* Copy back parameters. */
548 for (i = 0; i < parm_count; i++, src_parm++, dst_parm++) {
549 switch (dst_parm->type) {
550 case VMMDEV_HGCM_PARM_TYPE_32BIT:
551 case VMMDEV_HGCM_PARM_TYPE_64BIT:
552 *dst_parm = *src_parm;
553 break;
554
555 case VMMDEV_HGCM_PARM_TYPE_PAGELIST:
556 dst_parm->u.page_list.size = src_parm->u.page_list.size;
557 break;
558
559 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
560 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL:
561 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_IN:
562 case VMMDEV_HGCM_PARM_TYPE_LINADDR_KERNEL_OUT:
563 dst_parm->u.pointer.size = src_parm->u.pointer.size;
564 break;
565
566 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
567 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
568 dst_parm->u.pointer.size = src_parm->u.pointer.size;
569
570 p = (void __user *)dst_parm->u.pointer.u.linear_addr;
571 ret = copy_to_user(p, bounce_bufs[i],
572 min(src_parm->u.pointer.size,
573 dst_parm->u.pointer.size));
574 if (ret)
575 return -EFAULT;
576 break;
577
578 default:
579 WARN_ON(1);
580 return -EINVAL;
581 }
582 }
583
584 return 0;
585}
586
587int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
588 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
589 u32 parm_count, int *vbox_status)
590{
591 struct vmmdev_hgcm_call *call;
592 void **bounce_bufs = NULL;
593 bool leak_it;
594 size_t size;
595 int i, ret;
596
597 size = sizeof(struct vmmdev_hgcm_call) +
598 parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
599 /*
600 * Validate and buffer the parameters for the call. This also increases
601 * call_size with the amount of extra space needed for page lists.
602 */
603 ret = hgcm_call_preprocess(parms, parm_count, &bounce_bufs, &size);
604 if (ret) {
605 /* Even on error bounce bufs may still have been allocated */
606 goto free_bounce_bufs;
607 }
608
609 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
610 if (!call) {
611 ret = -ENOMEM;
612 goto free_bounce_bufs;
613 }
614
615 hgcm_call_init_call(call, client_id, function, parms, parm_count,
616 bounce_bufs);
617
618 ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
619 if (ret == 0) {
620 *vbox_status = call->header.result;
621 ret = hgcm_call_copy_back_result(call, parms, parm_count,
622 bounce_bufs);
623 }
624
625 if (!leak_it)
626 kfree(call);
627
628free_bounce_bufs:
629 if (bounce_bufs) {
630 for (i = 0; i < parm_count; i++)
631 kvfree(bounce_bufs[i]);
632 kfree(bounce_bufs);
633 }
634
635 return ret;
636}
637EXPORT_SYMBOL(vbg_hgcm_call);
638
639#ifdef CONFIG_COMPAT
640int vbg_hgcm_call32(
641 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
642 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
643 int *vbox_status)
644{
645 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
646 u32 i, size;
647 int ret = 0;
648
649 /* KISS allocate a temporary request and convert the parameters. */
650 size = parm_count * sizeof(struct vmmdev_hgcm_function_parameter);
651 parm64 = kzalloc(size, GFP_KERNEL);
652 if (!parm64)
653 return -ENOMEM;
654
655 for (i = 0; i < parm_count; i++) {
656 switch (parm32[i].type) {
657 case VMMDEV_HGCM_PARM_TYPE_32BIT:
658 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_32BIT;
659 parm64[i].u.value32 = parm32[i].u.value32;
660 break;
661
662 case VMMDEV_HGCM_PARM_TYPE_64BIT:
663 parm64[i].type = VMMDEV_HGCM_PARM_TYPE_64BIT;
664 parm64[i].u.value64 = parm32[i].u.value64;
665 break;
666
667 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
668 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
669 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
670 parm64[i].type = parm32[i].type;
671 parm64[i].u.pointer.size = parm32[i].u.pointer.size;
672 parm64[i].u.pointer.u.linear_addr =
673 parm32[i].u.pointer.u.linear_addr;
674 break;
675
676 default:
677 ret = -EINVAL;
678 }
679 if (ret < 0)
680 goto out_free;
681 }
682
683 ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
684 parm64, parm_count, vbox_status);
685 if (ret < 0)
686 goto out_free;
687
688 /* Copy back. */
689 for (i = 0; i < parm_count; i++, parm32++, parm64++) {
690 switch (parm64[i].type) {
691 case VMMDEV_HGCM_PARM_TYPE_32BIT:
692 parm32[i].u.value32 = parm64[i].u.value32;
693 break;
694
695 case VMMDEV_HGCM_PARM_TYPE_64BIT:
696 parm32[i].u.value64 = parm64[i].u.value64;
697 break;
698
699 case VMMDEV_HGCM_PARM_TYPE_LINADDR_OUT:
700 case VMMDEV_HGCM_PARM_TYPE_LINADDR:
701 case VMMDEV_HGCM_PARM_TYPE_LINADDR_IN:
702 parm32[i].u.pointer.size = parm64[i].u.pointer.size;
703 break;
704
705 default:
706 WARN_ON(1);
707 ret = -EINVAL;
708 }
709 }
710
711out_free:
712 kfree(parm64);
713 return ret;
714}
715#endif
716
717static const int vbg_status_code_to_errno_table[] = {
718 [-VERR_ACCESS_DENIED] = -EPERM,
719 [-VERR_FILE_NOT_FOUND] = -ENOENT,
720 [-VERR_PROCESS_NOT_FOUND] = -ESRCH,
721 [-VERR_INTERRUPTED] = -EINTR,
722 [-VERR_DEV_IO_ERROR] = -EIO,
723 [-VERR_TOO_MUCH_DATA] = -E2BIG,
724 [-VERR_BAD_EXE_FORMAT] = -ENOEXEC,
725 [-VERR_INVALID_HANDLE] = -EBADF,
726 [-VERR_TRY_AGAIN] = -EAGAIN,
727 [-VERR_NO_MEMORY] = -ENOMEM,
728 [-VERR_INVALID_POINTER] = -EFAULT,
729 [-VERR_RESOURCE_BUSY] = -EBUSY,
730 [-VERR_ALREADY_EXISTS] = -EEXIST,
731 [-VERR_NOT_SAME_DEVICE] = -EXDEV,
732 [-VERR_NOT_A_DIRECTORY] = -ENOTDIR,
733 [-VERR_PATH_NOT_FOUND] = -ENOTDIR,
734 [-VERR_IS_A_DIRECTORY] = -EISDIR,
735 [-VERR_INVALID_PARAMETER] = -EINVAL,
736 [-VERR_TOO_MANY_OPEN_FILES] = -ENFILE,
737 [-VERR_INVALID_FUNCTION] = -ENOTTY,
738 [-VERR_SHARING_VIOLATION] = -ETXTBSY,
739 [-VERR_FILE_TOO_BIG] = -EFBIG,
740 [-VERR_DISK_FULL] = -ENOSPC,
741 [-VERR_SEEK_ON_DEVICE] = -ESPIPE,
742 [-VERR_WRITE_PROTECT] = -EROFS,
743 [-VERR_BROKEN_PIPE] = -EPIPE,
744 [-VERR_DEADLOCK] = -EDEADLK,
745 [-VERR_FILENAME_TOO_LONG] = -ENAMETOOLONG,
746 [-VERR_FILE_LOCK_FAILED] = -ENOLCK,
747 [-VERR_NOT_IMPLEMENTED] = -ENOSYS,
748 [-VERR_NOT_SUPPORTED] = -ENOSYS,
749 [-VERR_DIR_NOT_EMPTY] = -ENOTEMPTY,
750 [-VERR_TOO_MANY_SYMLINKS] = -ELOOP,
751 [-VERR_NO_DATA] = -ENODATA,
752 [-VERR_NET_NO_NETWORK] = -ENONET,
753 [-VERR_NET_NOT_UNIQUE_NAME] = -ENOTUNIQ,
754 [-VERR_NO_TRANSLATION] = -EILSEQ,
755 [-VERR_NET_NOT_SOCKET] = -ENOTSOCK,
756 [-VERR_NET_DEST_ADDRESS_REQUIRED] = -EDESTADDRREQ,
757 [-VERR_NET_MSG_SIZE] = -EMSGSIZE,
758 [-VERR_NET_PROTOCOL_TYPE] = -EPROTOTYPE,
759 [-VERR_NET_PROTOCOL_NOT_AVAILABLE] = -ENOPROTOOPT,
760 [-VERR_NET_PROTOCOL_NOT_SUPPORTED] = -EPROTONOSUPPORT,
761 [-VERR_NET_SOCKET_TYPE_NOT_SUPPORTED] = -ESOCKTNOSUPPORT,
762 [-VERR_NET_OPERATION_NOT_SUPPORTED] = -EOPNOTSUPP,
763 [-VERR_NET_PROTOCOL_FAMILY_NOT_SUPPORTED] = -EPFNOSUPPORT,
764 [-VERR_NET_ADDRESS_FAMILY_NOT_SUPPORTED] = -EAFNOSUPPORT,
765 [-VERR_NET_ADDRESS_IN_USE] = -EADDRINUSE,
766 [-VERR_NET_ADDRESS_NOT_AVAILABLE] = -EADDRNOTAVAIL,
767 [-VERR_NET_DOWN] = -ENETDOWN,
768 [-VERR_NET_UNREACHABLE] = -ENETUNREACH,
769 [-VERR_NET_CONNECTION_RESET] = -ENETRESET,
770 [-VERR_NET_CONNECTION_ABORTED] = -ECONNABORTED,
771 [-VERR_NET_CONNECTION_RESET_BY_PEER] = -ECONNRESET,
772 [-VERR_NET_NO_BUFFER_SPACE] = -ENOBUFS,
773 [-VERR_NET_ALREADY_CONNECTED] = -EISCONN,
774 [-VERR_NET_NOT_CONNECTED] = -ENOTCONN,
775 [-VERR_NET_SHUTDOWN] = -ESHUTDOWN,
776 [-VERR_NET_TOO_MANY_REFERENCES] = -ETOOMANYREFS,
777 [-VERR_TIMEOUT] = -ETIMEDOUT,
778 [-VERR_NET_CONNECTION_REFUSED] = -ECONNREFUSED,
779 [-VERR_NET_HOST_DOWN] = -EHOSTDOWN,
780 [-VERR_NET_HOST_UNREACHABLE] = -EHOSTUNREACH,
781 [-VERR_NET_ALREADY_IN_PROGRESS] = -EALREADY,
782 [-VERR_NET_IN_PROGRESS] = -EINPROGRESS,
783 [-VERR_MEDIA_NOT_PRESENT] = -ENOMEDIUM,
784 [-VERR_MEDIA_NOT_RECOGNIZED] = -EMEDIUMTYPE,
785};
786
787int vbg_status_code_to_errno(int rc)
788{
789 if (rc >= 0)
790 return 0;
791
792 rc = -rc;
793 if (rc >= ARRAY_SIZE(vbg_status_code_to_errno_table) ||
794 vbg_status_code_to_errno_table[rc] == 0) {
795 vbg_warn("%s: Unhandled err %d\n", __func__, -rc);
796 return -EPROTO;
797 }
798
799 return vbg_status_code_to_errno_table[rc];
800}
801EXPORT_SYMBOL(vbg_status_code_to_errno);
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
new file mode 100644
index 000000000000..5e2ae978935d
--- /dev/null
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -0,0 +1,449 @@
1/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
2/*
3 * Virtual Device for Guest <-> VMM/Host communication interface
4 *
5 * Copyright (C) 2006-2016 Oracle Corporation
6 */
7
8#ifndef __VBOX_VMMDEV_H__
9#define __VBOX_VMMDEV_H__
10
11#include <asm/bitsperlong.h>
12#include <linux/sizes.h>
13#include <linux/types.h>
14#include <linux/vbox_vmmdev_types.h>
15
16/* Port for generic request interface (relative offset). */
17#define VMMDEV_PORT_OFF_REQUEST 0
18
19/** Layout of VMMDEV RAM region that contains information for guest. */
20struct vmmdev_memory {
21 /** The size of this structure. */
22 u32 size;
23 /** The structure version. (VMMDEV_MEMORY_VERSION) */
24 u32 version;
25
26 union {
27 struct {
28 /** Flag telling that VMMDev has events pending. */
29 u8 have_events;
30 /** Explicit padding, MBZ. */
31 u8 padding[3];
32 } V1_04;
33
34 struct {
35 /** Pending events flags, set by host. */
36 u32 host_events;
37 /** Mask of events the guest wants, set by guest. */
38 u32 guest_event_mask;
39 } V1_03;
40 } V;
41
42 /* struct vbva_memory, not used */
43};
44VMMDEV_ASSERT_SIZE(vmmdev_memory, 8 + 8);
45
46/** Version of vmmdev_memory structure (vmmdev_memory::version). */
47#define VMMDEV_MEMORY_VERSION (1)
48
49/* Host mouse capabilities has been changed. */
50#define VMMDEV_EVENT_MOUSE_CAPABILITIES_CHANGED BIT(0)
51/* HGCM event. */
52#define VMMDEV_EVENT_HGCM BIT(1)
53/* A display change request has been issued. */
54#define VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST BIT(2)
55/* Credentials are available for judgement. */
56#define VMMDEV_EVENT_JUDGE_CREDENTIALS BIT(3)
57/* The guest has been restored. */
58#define VMMDEV_EVENT_RESTORED BIT(4)
59/* Seamless mode state changed. */
60#define VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST BIT(5)
61/* Memory balloon size changed. */
62#define VMMDEV_EVENT_BALLOON_CHANGE_REQUEST BIT(6)
63/* Statistics interval changed. */
64#define VMMDEV_EVENT_STATISTICS_INTERVAL_CHANGE_REQUEST BIT(7)
65/* VRDP status changed. */
66#define VMMDEV_EVENT_VRDP BIT(8)
67/* New mouse position data available. */
68#define VMMDEV_EVENT_MOUSE_POSITION_CHANGED BIT(9)
69/* CPU hotplug event occurred. */
70#define VMMDEV_EVENT_CPU_HOTPLUG BIT(10)
71/* The mask of valid events, for sanity checking. */
72#define VMMDEV_EVENT_VALID_EVENT_MASK 0x000007ffU
73
74/*
75 * Additions are allowed to work only if additions_major == vmmdev_current &&
76 * additions_minor <= vmmdev_current. Additions version is reported to host
77 * (VMMDev) by VMMDEVREQ_REPORT_GUEST_INFO.
78 */
79#define VMMDEV_VERSION 0x00010004
80#define VMMDEV_VERSION_MAJOR (VMMDEV_VERSION >> 16)
81#define VMMDEV_VERSION_MINOR (VMMDEV_VERSION & 0xffff)
82
83/* Maximum request packet size. */
84#define VMMDEV_MAX_VMMDEVREQ_SIZE 1048576
85
86/* Version of vmmdev_request_header structure. */
87#define VMMDEV_REQUEST_HEADER_VERSION 0x10001
88
89/** struct vmmdev_request_header - Generic VMMDev request header. */
90struct vmmdev_request_header {
91 /** IN: Size of the structure in bytes (including body). */
92 u32 size;
93 /** IN: Version of the structure. */
94 u32 version;
95 /** IN: Type of the request. */
96 enum vmmdev_request_type request_type;
97 /** OUT: Return code. */
98 s32 rc;
99 /** Reserved field no.1. MBZ. */
100 u32 reserved1;
101 /** Reserved field no.2. MBZ. */
102 u32 reserved2;
103};
104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
105
106/**
107 * struct vmmdev_mouse_status - Mouse status request structure.
108 *
109 * Used by VMMDEVREQ_GET_MOUSE_STATUS and VMMDEVREQ_SET_MOUSE_STATUS.
110 */
111struct vmmdev_mouse_status {
112 /** header */
113 struct vmmdev_request_header header;
114 /** Mouse feature mask. See VMMDEV_MOUSE_*. */
115 u32 mouse_features;
116 /** Mouse x position. */
117 s32 pointer_pos_x;
118 /** Mouse y position. */
119 s32 pointer_pos_y;
120};
121VMMDEV_ASSERT_SIZE(vmmdev_mouse_status, 24 + 12);
122
123/* The guest can (== wants to) handle absolute coordinates. */
124#define VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE BIT(0)
125/*
126 * The host can (== wants to) send absolute coordinates.
127 * (Input not captured.)
128 */
129#define VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE BIT(1)
130/*
131 * The guest can *NOT* switch to software cursor and therefore depends on the
132 * host cursor.
133 *
134 * When guest additions are installed and the host has promised to display the
135 * cursor itself, the guest installs a hardware mouse driver. Don't ask the
136 * guest to switch to a software cursor then.
137 */
138#define VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR BIT(2)
139/* The host does NOT provide support for drawing the cursor itself. */
140#define VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER BIT(3)
141/* The guest can read VMMDev events to find out about pointer movement */
142#define VMMDEV_MOUSE_NEW_PROTOCOL BIT(4)
143/*
144 * If the guest changes the status of the VMMDEV_MOUSE_GUEST_NEEDS_HOST_CURSOR
145 * bit, the host will honour this.
146 */
147#define VMMDEV_MOUSE_HOST_RECHECKS_NEEDS_HOST_CURSOR BIT(5)
148/*
149 * The host supplies an absolute pointing device. The Guest Additions may
150 * wish to use this to decide whether to install their own driver.
151 */
152#define VMMDEV_MOUSE_HOST_HAS_ABS_DEV BIT(6)
153
154/* The minimum value our pointing device can return. */
155#define VMMDEV_MOUSE_RANGE_MIN 0
156/* The maximum value our pointing device can return. */
157#define VMMDEV_MOUSE_RANGE_MAX 0xFFFF
158
159/**
160 * struct vmmdev_host_version - VirtualBox host version request structure.
161 *
162 * VBG uses this to detect the precense of new features in the interface.
163 */
164struct vmmdev_host_version {
165 /** Header. */
166 struct vmmdev_request_header header;
167 /** Major version. */
168 u16 major;
169 /** Minor version. */
170 u16 minor;
171 /** Build number. */
172 u32 build;
173 /** SVN revision. */
174 u32 revision;
175 /** Feature mask. */
176 u32 features;
177};
178VMMDEV_ASSERT_SIZE(vmmdev_host_version, 24 + 16);
179
180/* Physical page lists are supported by HGCM. */
181#define VMMDEV_HVF_HGCM_PHYS_PAGE_LIST BIT(0)
182
183/**
184 * struct vmmdev_mask - Structure to set / clear bits in a mask used for
185 * VMMDEVREQ_SET_GUEST_CAPABILITIES and VMMDEVREQ_CTL_GUEST_FILTER_MASK.
186 */
187struct vmmdev_mask {
188 /** Header. */
189 struct vmmdev_request_header header;
190 /** Mask of bits to be set. */
191 u32 or_mask;
192 /** Mask of bits to be cleared. */
193 u32 not_mask;
194};
195VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
196
197/* The guest supports seamless display rendering. */
198#define VMMDEV_GUEST_SUPPORTS_SEAMLESS BIT(0)
199/* The guest supports mapping guest to host windows. */
200#define VMMDEV_GUEST_SUPPORTS_GUEST_HOST_WINDOW_MAPPING BIT(1)
201/*
202 * The guest graphical additions are active.
203 * Used for fast activation and deactivation of certain graphical operations
204 * (e.g. resizing & seamless). The legacy VMMDEVREQ_REPORT_GUEST_CAPABILITIES
205 * request sets this automatically, but VMMDEVREQ_SET_GUEST_CAPABILITIES does
206 * not.
207 */
208#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
209
210/** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
211struct vmmdev_hypervisorinfo {
212 /** Header. */
213 struct vmmdev_request_header header;
214 /**
215 * Guest virtual address of proposed hypervisor start.
216 * Not used by VMMDEVREQ_GET_HYPERVISOR_INFO.
217 */
218 u32 hypervisor_start;
219 /** Hypervisor size in bytes. */
220 u32 hypervisor_size;
221};
222VMMDEV_ASSERT_SIZE(vmmdev_hypervisorinfo, 24 + 8);
223
224/** struct vmmdev_events - Pending events structure. */
225struct vmmdev_events {
226 /** Header. */
227 struct vmmdev_request_header header;
228 /** OUT: Pending event mask. */
229 u32 events;
230};
231VMMDEV_ASSERT_SIZE(vmmdev_events, 24 + 4);
232
233#define VMMDEV_OSTYPE_LINUX26 0x53000
234#define VMMDEV_OSTYPE_X64 BIT(8)
235
236/** struct vmmdev_guestinfo - Guest information report. */
237struct vmmdev_guest_info {
238 /** Header. */
239 struct vmmdev_request_header header;
240 /**
241 * The VMMDev interface version expected by additions.
242 * *Deprecated*, do not use anymore! Will be removed.
243 */
244 u32 interface_version;
245 /** Guest OS type. */
246 u32 os_type;
247};
248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
249
250/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
251struct vmmdev_guest_info2 {
252 /** Header. */
253 struct vmmdev_request_header header;
254 /** Major version. */
255 u16 additions_major;
256 /** Minor version. */
257 u16 additions_minor;
258 /** Build number. */
259 u32 additions_build;
260 /** SVN revision. */
261 u32 additions_revision;
262 /** Feature mask, currently unused. */
263 u32 additions_features;
264 /**
265 * The intentional meaning of this field was:
266 * Some additional information, for example 'Beta 1' or something like
267 * that.
268 *
269 * The way it was implemented was implemented: VBG_VERSION_STRING.
270 *
271 * This means the first three members are duplicated in this field (if
272 * the guest build config is sane). So, the user must check this and
273 * chop it off before usage. There is, because of the Main code's blind
274 * trust in the field's content, no way back.
275 */
276 char name[128];
277};
278VMMDEV_ASSERT_SIZE(vmmdev_guest_info2, 24 + 144);
279
280enum vmmdev_guest_facility_type {
281 VBOXGUEST_FACILITY_TYPE_UNKNOWN = 0,
282 VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER = 20,
283 /* VBoxGINA / VBoxCredProv / pam_vbox. */
284 VBOXGUEST_FACILITY_TYPE_AUTO_LOGON = 90,
285 VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE = 100,
286 /* VBoxTray (Windows), VBoxClient (Linux, Unix). */
287 VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT = 101,
288 VBOXGUEST_FACILITY_TYPE_SEAMLESS = 1000,
289 VBOXGUEST_FACILITY_TYPE_GRAPHICS = 1100,
290 VBOXGUEST_FACILITY_TYPE_ALL = 0x7ffffffe,
291 /* Ensure the enum is a 32 bit data-type */
292 VBOXGUEST_FACILITY_TYPE_SIZEHACK = 0x7fffffff
293};
294
295enum vmmdev_guest_facility_status {
296 VBOXGUEST_FACILITY_STATUS_INACTIVE = 0,
297 VBOXGUEST_FACILITY_STATUS_PAUSED = 1,
298 VBOXGUEST_FACILITY_STATUS_PRE_INIT = 20,
299 VBOXGUEST_FACILITY_STATUS_INIT = 30,
300 VBOXGUEST_FACILITY_STATUS_ACTIVE = 50,
301 VBOXGUEST_FACILITY_STATUS_TERMINATING = 100,
302 VBOXGUEST_FACILITY_STATUS_TERMINATED = 101,
303 VBOXGUEST_FACILITY_STATUS_FAILED = 800,
304 VBOXGUEST_FACILITY_STATUS_UNKNOWN = 999,
305 /* Ensure the enum is a 32 bit data-type */
306 VBOXGUEST_FACILITY_STATUS_SIZEHACK = 0x7fffffff
307};
308
309/** struct vmmdev_guest_status - Guest Additions status structure. */
310struct vmmdev_guest_status {
311 /** Header. */
312 struct vmmdev_request_header header;
313 /** Facility the status is indicated for. */
314 enum vmmdev_guest_facility_type facility;
315 /** Current guest status. */
316 enum vmmdev_guest_facility_status status;
317 /** Flags, not used at the moment. */
318 u32 flags;
319};
320VMMDEV_ASSERT_SIZE(vmmdev_guest_status, 24 + 12);
321
322#define VMMDEV_MEMORY_BALLOON_CHUNK_SIZE (1048576)
323#define VMMDEV_MEMORY_BALLOON_CHUNK_PAGES (1048576 / 4096)
324
325/** struct vmmdev_memballoon_info - Memory-balloon info structure. */
326struct vmmdev_memballoon_info {
327 /** Header. */
328 struct vmmdev_request_header header;
329 /** Balloon size in megabytes. */
330 u32 balloon_chunks;
331 /** Guest ram size in megabytes. */
332 u32 phys_mem_chunks;
333 /**
334 * Setting this to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST indicates that
335 * the request is a response to that event.
336 * (Don't confuse this with VMMDEVREQ_ACKNOWLEDGE_EVENTS.)
337 */
338 u32 event_ack;
339};
340VMMDEV_ASSERT_SIZE(vmmdev_memballoon_info, 24 + 12);
341
342/** struct vmmdev_memballoon_change - Change the size of the balloon. */
343struct vmmdev_memballoon_change {
344 /** Header. */
345 struct vmmdev_request_header header;
346 /** The number of pages in the array. */
347 u32 pages;
348 /** true = inflate, false = deflate. */
349 u32 inflate;
350 /** Physical address (u64) of each page. */
351 u64 phys_page[VMMDEV_MEMORY_BALLOON_CHUNK_PAGES];
352};
353
354/** struct vmmdev_write_core_dump - Write Core Dump request data. */
355struct vmmdev_write_core_dump {
356 /** Header. */
357 struct vmmdev_request_header header;
358 /** Flags (reserved, MBZ). */
359 u32 flags;
360};
361VMMDEV_ASSERT_SIZE(vmmdev_write_core_dump, 24 + 4);
362
363/** struct vmmdev_heartbeat - Heart beat check state structure. */
364struct vmmdev_heartbeat {
365 /** Header. */
366 struct vmmdev_request_header header;
367 /** OUT: Guest heartbeat interval in nanosec. */
368 u64 interval_ns;
369 /** Heartbeat check flag. */
370 u8 enabled;
371 /** Explicit padding, MBZ. */
372 u8 padding[3];
373} __packed;
374VMMDEV_ASSERT_SIZE(vmmdev_heartbeat, 24 + 12);
375
376#define VMMDEV_HGCM_REQ_DONE BIT(0)
377#define VMMDEV_HGCM_REQ_CANCELLED BIT(1)
378
379/** struct vmmdev_hgcmreq_header - vmmdev HGCM requests header. */
380struct vmmdev_hgcmreq_header {
381 /** Request header. */
382 struct vmmdev_request_header header;
383
384 /** HGCM flags. */
385 u32 flags;
386
387 /** Result code. */
388 s32 result;
389};
390VMMDEV_ASSERT_SIZE(vmmdev_hgcmreq_header, 24 + 8);
391
392/** struct vmmdev_hgcm_connect - HGCM connect request structure. */
393struct vmmdev_hgcm_connect {
394 /** HGCM request header. */
395 struct vmmdev_hgcmreq_header header;
396
397 /** IN: Description of service to connect to. */
398 struct vmmdev_hgcm_service_location loc;
399
400 /** OUT: Client identifier assigned by local instance of HGCM. */
401 u32 client_id;
402};
403VMMDEV_ASSERT_SIZE(vmmdev_hgcm_connect, 32 + 132 + 4);
404
405/** struct vmmdev_hgcm_disconnect - HGCM disconnect request structure. */
406struct vmmdev_hgcm_disconnect {
407 /** HGCM request header. */
408 struct vmmdev_hgcmreq_header header;
409
410 /** IN: Client identifier. */
411 u32 client_id;
412};
413VMMDEV_ASSERT_SIZE(vmmdev_hgcm_disconnect, 32 + 4);
414
415#define VMMDEV_HGCM_MAX_PARMS 32
416
417/** struct vmmdev_hgcm_call - HGCM call request structure. */
418struct vmmdev_hgcm_call {
419 /* request header */
420 struct vmmdev_hgcmreq_header header;
421
422 /** IN: Client identifier. */
423 u32 client_id;
424 /** IN: Service function number. */
425 u32 function;
426 /** IN: Number of parameters. */
427 u32 parm_count;
428 /** Parameters follow in form: HGCMFunctionParameter32|64 parms[X]; */
429};
430VMMDEV_ASSERT_SIZE(vmmdev_hgcm_call, 32 + 12);
431
432/**
433 * struct vmmdev_hgcm_cancel2 - HGCM cancel request structure, version 2.
434 *
435 * After the request header.rc will be:
436 *
437 * VINF_SUCCESS when cancelled.
438 * VERR_NOT_FOUND if the specified request cannot be found.
439 * VERR_INVALID_PARAMETER if the address is invalid valid.
440 */
441struct vmmdev_hgcm_cancel2 {
442 /** Header. */
443 struct vmmdev_request_header header;
444 /** The physical address of the request to cancel. */
445 u32 phys_req_to_cancel;
446};
447VMMDEV_ASSERT_SIZE(vmmdev_hgcm_cancel2, 24 + 4);
448
449#endif