aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-03-29 18:03:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-29 18:03:30 -0400
commit6f51092301a42105c5cfcb96033e8734aa1bf160 (patch)
tree5fc94c02ec335e6d8ca400211e6e03ff70b27339
parent3467b90737e1551dbaa5b71fd5a54425fd4a72b2 (diff)
parent0532a1b0d045115521a93acf28f1270df89ad806 (diff)
Merge tag 'char-misc-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver fixes from Greg KH: "Here are some binder, habanalabs, and vboxguest driver fixes for 5.1-rc3. The Binder fixes resolve some reported issues found by testing, first by the selinux developers, and then earlier today by syzbot. The habanalabs fixes are all minor, resolving a number of tiny things. The vboxguest patches are a bit larger. They resolve the fact that virtual box decided to change their api in their latest release in a way that broke the existing kernel code, despite saying that they were never going to do that. So this is a bit of a "new feature", but is good to get merged so that 5.1 will work with the latest release. The changes are not large and of course virtual box "swears" they will not break this again, but no one is holding their breath here. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-5.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: virt: vbox: Implement passing requestor info to the host for VirtualBox 6.0.x binder: fix race between munmap() and direct reclaim binder: fix BUG_ON found by selinux-testsuite habanalabs: cast to expected type habanalabs: prevent host crash during suspend/resume habanalabs: perform accounting for active CS habanalabs: fix mapping with page size bigger than 4KB habanalabs: complete user context cleanup before hard reset habanalabs: fix bug when mapping very large memory area habanalabs: fix MMU number of pages calculation
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c65
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
-rw-r--r--include/linux/vbox_utils.h12
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h60
18 files changed, 324 insertions, 184 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
2057 size_t object_size = 0; 2057 size_t object_size = 0;
2058 2058
2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) 2060 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2061 !IS_ALIGNED(offset, sizeof(u32)))
2061 return 0; 2062 return 0;
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2063 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size); 2064 offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
927 927
928 index = page - alloc->pages; 928 index = page - alloc->pages;
929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
930
931 mm = alloc->vma_vm_mm;
932 if (!mmget_not_zero(mm))
933 goto err_mmget;
934 if (!down_write_trylock(&mm->mmap_sem))
935 goto err_down_write_mmap_sem_failed;
930 vma = binder_alloc_get_vma(alloc); 936 vma = binder_alloc_get_vma(alloc);
931 if (vma) {
932 if (!mmget_not_zero(alloc->vma_vm_mm))
933 goto err_mmget;
934 mm = alloc->vma_vm_mm;
935 if (!down_read_trylock(&mm->mmap_sem))
936 goto err_down_write_mmap_sem_failed;
937 }
938 937
939 list_lru_isolate(lru, item); 938 list_lru_isolate(lru, item);
940 spin_unlock(lock); 939 spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
945 zap_page_range(vma, page_addr, PAGE_SIZE); 944 zap_page_range(vma, page_addr, PAGE_SIZE);
946 945
947 trace_binder_unmap_user_end(alloc, index); 946 trace_binder_unmap_user_end(alloc, index);
948
949 up_read(&mm->mmap_sem);
950 mmput(mm);
951 } 947 }
948 up_write(&mm->mmap_sem);
949 mmput(mm);
952 950
953 trace_binder_unmap_kernel_start(alloc, index); 951 trace_binder_unmap_kernel_start(alloc, index);
954 952
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
179 179
180 /* We also need to update CI for internal queues */ 180 /* We also need to update CI for internal queues */
181 if (cs->submitted) { 181 if (cs->submitted) {
182 int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
183
184 WARN_ONCE((cs_cnt < 0),
185 "hl%d: error in CS active cnt %d\n",
186 hdev->id, cs_cnt);
187
182 hl_int_hw_queue_update_ci(cs); 188 hl_int_hw_queue_update_ci(cs);
183 189
184 spin_lock(&hdev->hw_queues_mirror_lock); 190 spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; 232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
233 enum vm_type_t *vm_type; 233 enum vm_type_t *vm_type;
234 bool once = true; 234 bool once = true;
235 u64 j;
235 int i; 236 int i;
236 237
237 if (!dev_entry->hdev->mmu_enable) 238 if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
260 } else { 261 } else {
261 phys_pg_pack = hnode->ptr; 262 phys_pg_pack = hnode->ptr;
262 seq_printf(s, 263 seq_printf(s,
263 " 0x%-14llx %-10u %-4u\n", 264 " 0x%-14llx %-10llu %-4u\n",
264 hnode->vaddr, phys_pg_pack->total_size, 265 hnode->vaddr, phys_pg_pack->total_size,
265 phys_pg_pack->handle); 266 phys_pg_pack->handle);
266 } 267 }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
282 phys_pg_pack->page_size); 283 phys_pg_pack->page_size);
283 seq_puts(s, " physical address\n"); 284 seq_puts(s, " physical address\n");
284 seq_puts(s, "---------------------\n"); 285 seq_puts(s, "---------------------\n");
285 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 286 for (j = 0 ; j < phys_pg_pack->npages ; j++) {
286 seq_printf(s, " 0x%-14llx\n", 287 seq_printf(s, " 0x%-14llx\n",
287 phys_pg_pack->pages[i]); 288 phys_pg_pack->pages[j]);
288 } 289 }
289 } 290 }
290 spin_unlock(&vm->idr_lock); 291 spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
11#include <linux/sched/signal.h> 11#include <linux/sched/signal.h>
12#include <linux/hwmon.h> 12#include <linux/hwmon.h>
13 13
14#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
15
14bool hl_device_disabled_or_in_reset(struct hl_device *hdev) 16bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
15{ 17{
16 if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) 18 if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
216 spin_lock_init(&hdev->hw_queues_mirror_lock); 218 spin_lock_init(&hdev->hw_queues_mirror_lock);
217 atomic_set(&hdev->in_reset, 0); 219 atomic_set(&hdev->in_reset, 0);
218 atomic_set(&hdev->fd_open_cnt, 0); 220 atomic_set(&hdev->fd_open_cnt, 0);
221 atomic_set(&hdev->cs_active_cnt, 0);
219 222
220 return 0; 223 return 0;
221 224
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
413 416
414 pci_save_state(hdev->pdev); 417 pci_save_state(hdev->pdev);
415 418
419 /* Block future CS/VM/JOB completion operations */
420 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
421 if (rc) {
422 dev_err(hdev->dev, "Can't suspend while in reset\n");
423 return -EIO;
424 }
425
426 /* This blocks all other stuff that is not blocked by in_reset */
427 hdev->disabled = true;
428
429 /*
430 * Flush anyone that is inside the critical section of enqueue
431 * jobs to the H/W
432 */
433 hdev->asic_funcs->hw_queues_lock(hdev);
434 hdev->asic_funcs->hw_queues_unlock(hdev);
435
436 /* Flush processes that are sending message to CPU */
437 mutex_lock(&hdev->send_cpu_message_lock);
438 mutex_unlock(&hdev->send_cpu_message_lock);
439
416 rc = hdev->asic_funcs->suspend(hdev); 440 rc = hdev->asic_funcs->suspend(hdev);
417 if (rc) 441 if (rc)
418 dev_err(hdev->dev, 442 dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
440 464
441 pci_set_power_state(hdev->pdev, PCI_D0); 465 pci_set_power_state(hdev->pdev, PCI_D0);
442 pci_restore_state(hdev->pdev); 466 pci_restore_state(hdev->pdev);
443 rc = pci_enable_device(hdev->pdev); 467 rc = pci_enable_device_mem(hdev->pdev);
444 if (rc) { 468 if (rc) {
445 dev_err(hdev->dev, 469 dev_err(hdev->dev,
446 "Failed to enable PCI device in resume\n"); 470 "Failed to enable PCI device in resume\n");
447 return rc; 471 return rc;
448 } 472 }
449 473
474 pci_set_master(hdev->pdev);
475
450 rc = hdev->asic_funcs->resume(hdev); 476 rc = hdev->asic_funcs->resume(hdev);
451 if (rc) { 477 if (rc) {
452 dev_err(hdev->dev, 478 dev_err(hdev->dev, "Failed to resume device after suspend\n");
453 "Failed to enable PCI access from device CPU\n"); 479 goto disable_device;
454 return rc; 480 }
481
482
483 hdev->disabled = false;
484 atomic_set(&hdev->in_reset, 0);
485
486 rc = hl_device_reset(hdev, true, false);
487 if (rc) {
488 dev_err(hdev->dev, "Failed to reset device during resume\n");
489 goto disable_device;
455 } 490 }
456 491
457 return 0; 492 return 0;
493
494disable_device:
495 pci_clear_master(hdev->pdev);
496 pci_disable_device(hdev->pdev);
497
498 return rc;
458} 499}
459 500
460static void hl_device_hard_reset_pending(struct work_struct *work) 501static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
462 struct hl_device_reset_work *device_reset_work = 503 struct hl_device_reset_work *device_reset_work =
463 container_of(work, struct hl_device_reset_work, reset_work); 504 container_of(work, struct hl_device_reset_work, reset_work);
464 struct hl_device *hdev = device_reset_work->hdev; 505 struct hl_device *hdev = device_reset_work->hdev;
465 u16 pending_cnt = HL_PENDING_RESET_PER_SEC; 506 u16 pending_total, pending_cnt;
466 struct task_struct *task = NULL; 507 struct task_struct *task = NULL;
467 508
509 if (hdev->pldm)
510 pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
511 else
512 pending_total = HL_PENDING_RESET_PER_SEC;
513
514 pending_cnt = pending_total;
515
468 /* Flush all processes that are inside hl_open */ 516 /* Flush all processes that are inside hl_open */
469 mutex_lock(&hdev->fd_open_cnt_lock); 517 mutex_lock(&hdev->fd_open_cnt_lock);
470 518
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
489 } 537 }
490 } 538 }
491 539
540 pending_cnt = pending_total;
541
542 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
543
544 pending_cnt--;
545
546 ssleep(1);
547 }
548
549 if (atomic_read(&hdev->fd_open_cnt))
550 dev_crit(hdev->dev,
551 "Going to hard reset with open user contexts\n");
552
492 mutex_unlock(&hdev->fd_open_cnt_lock); 553 mutex_unlock(&hdev->fd_open_cnt_lock);
493 554
494 hl_device_reset(hdev, true, true); 555 hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..ea979ebd62fb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
1201 return retval; 1201 return retval;
1202} 1202}
1203 1203
1204static void goya_resume_external_queues(struct hl_device *hdev)
1205{
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1211}
1212
1213/* 1204/*
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU 1205 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1215 * 1206 *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
2178 return retval; 2169 return retval;
2179} 2170}
2180 2171
2181static void goya_resume_internal_queues(struct hl_device *hdev)
2182{
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2185
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2188
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2191
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2194
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2197
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2200
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2203
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2206
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2209}
2210
2211static void goya_dma_stall(struct hl_device *hdev) 2172static void goya_dma_stall(struct hl_device *hdev)
2212{ 2173{
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); 2174 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
2905{ 2866{
2906 int rc; 2867 int rc;
2907 2868
2908 rc = goya_stop_internal_queues(hdev);
2909
2910 if (rc) {
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2912 return rc;
2913 }
2914
2915 rc = goya_stop_external_queues(hdev);
2916
2917 if (rc) {
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2919 return rc;
2920 }
2921
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 2869 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2923 if (rc) 2870 if (rc)
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); 2871 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
2928 2875
2929int goya_resume(struct hl_device *hdev) 2876int goya_resume(struct hl_device *hdev)
2930{ 2877{
2931 int rc; 2878 return goya_init_iatu(hdev);
2932
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2935
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2937 if (rc)
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2939 return rc;
2940} 2879}
2941 2880
2942static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, 2881static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3070 3009
3071 *dma_handle = hdev->asic_prop.sram_base_address; 3010 *dma_handle = hdev->asic_prop.sram_base_address;
3072 3011
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; 3012 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
3074 3013
3075 switch (queue_id) { 3014 switch (queue_id) {
3076 case GOYA_QUEUE_ID_MME: 3015 case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
793 * struct hl_vm_phys_pg_pack - physical page pack. 793 * struct hl_vm_phys_pg_pack - physical page pack.
794 * @vm_type: describes the type of the virtual area descriptor. 794 * @vm_type: describes the type of the virtual area descriptor.
795 * @pages: the physical page array. 795 * @pages: the physical page array.
796 * @npages: num physical pages in the pack.
797 * @total_size: total size of all the pages in this list.
796 * @mapping_cnt: number of shared mappings. 798 * @mapping_cnt: number of shared mappings.
797 * @asid: the context related to this list. 799 * @asid: the context related to this list.
798 * @npages: num physical pages in the pack.
799 * @page_size: size of each page in the pack. 800 * @page_size: size of each page in the pack.
800 * @total_size: total size of all the pages in this list.
801 * @flags: HL_MEM_* flags related to this list. 801 * @flags: HL_MEM_* flags related to this list.
802 * @handle: the provided handle related to this list. 802 * @handle: the provided handle related to this list.
803 * @offset: offset from the first page. 803 * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
807struct hl_vm_phys_pg_pack { 807struct hl_vm_phys_pg_pack {
808 enum vm_type_t vm_type; /* must be first */ 808 enum vm_type_t vm_type; /* must be first */
809 u64 *pages; 809 u64 *pages;
810 u64 npages;
811 u64 total_size;
810 atomic_t mapping_cnt; 812 atomic_t mapping_cnt;
811 u32 asid; 813 u32 asid;
812 u32 npages;
813 u32 page_size; 814 u32 page_size;
814 u32 total_size;
815 u32 flags; 815 u32 flags;
816 u32 handle; 816 u32 handle;
817 u32 offset; 817 u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
1056 * @cb_pool_lock: protects the CB pool. 1056 * @cb_pool_lock: protects the CB pool.
1057 * @user_ctx: current user context executing. 1057 * @user_ctx: current user context executing.
1058 * @dram_used_mem: current DRAM memory consumption. 1058 * @dram_used_mem: current DRAM memory consumption.
1059 * @in_reset: is device in reset flow.
1060 * @curr_pll_profile: current PLL profile.
1061 * @fd_open_cnt: number of open user processes.
1062 * @timeout_jiffies: device CS timeout value. 1059 * @timeout_jiffies: device CS timeout value.
1063 * @max_power: the max power of the device, as configured by the sysadmin. This 1060 * @max_power: the max power of the device, as configured by the sysadmin. This
1064 * value is saved so in case of hard-reset, KMD will restore this 1061 * value is saved so in case of hard-reset, KMD will restore this
1065 * value and update the F/W after the re-initialization 1062 * value and update the F/W after the re-initialization
1063 * @in_reset: is device in reset flow.
1064 * @curr_pll_profile: current PLL profile.
1065 * @fd_open_cnt: number of open user processes.
1066 * @cs_active_cnt: number of active command submissions on this device (active
1067 * means already in H/W queues)
1066 * @major: habanalabs KMD major. 1068 * @major: habanalabs KMD major.
1067 * @high_pll: high PLL profile frequency. 1069 * @high_pll: high PLL profile frequency.
1068 * @soft_reset_cnt: number of soft reset since KMD loading. 1070 * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
1128 struct hl_ctx *user_ctx; 1130 struct hl_ctx *user_ctx;
1129 1131
1130 atomic64_t dram_used_mem; 1132 atomic64_t dram_used_mem;
1133 u64 timeout_jiffies;
1134 u64 max_power;
1131 atomic_t in_reset; 1135 atomic_t in_reset;
1132 atomic_t curr_pll_profile; 1136 atomic_t curr_pll_profile;
1133 atomic_t fd_open_cnt; 1137 atomic_t fd_open_cnt;
1134 u64 timeout_jiffies; 1138 atomic_t cs_active_cnt;
1135 u64 max_power;
1136 u32 major; 1139 u32 major;
1137 u32 high_pll; 1140 u32 high_pll;
1138 u32 soft_reset_cnt; 1141 u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
370 spin_unlock(&hdev->hw_queues_mirror_lock); 370 spin_unlock(&hdev->hw_queues_mirror_lock);
371 } 371 }
372 372
373 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { 373 atomic_inc(&hdev->cs_active_cnt);
374
375 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
374 if (job->ext_queue) 376 if (job->ext_queue)
375 ext_hw_queue_schedule_job(job); 377 ext_hw_queue_schedule_job(job);
376 else 378 else
377 int_hw_queue_schedule_job(job); 379 int_hw_queue_schedule_job(job);
378 }
379 380
380 cs->submitted = true; 381 cs->submitted = true;
381 382
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
56 struct hl_device *hdev = ctx->hdev; 56 struct hl_device *hdev = ctx->hdev;
57 struct hl_vm *vm = &hdev->vm; 57 struct hl_vm *vm = &hdev->vm;
58 struct hl_vm_phys_pg_pack *phys_pg_pack; 58 struct hl_vm_phys_pg_pack *phys_pg_pack;
59 u64 paddr = 0; 59 u64 paddr = 0, total_size, num_pgs, i;
60 u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; 60 u32 num_curr_pgs, page_size, page_shift;
61 int handle, rc, i; 61 int handle, rc;
62 bool contiguous; 62 bool contiguous;
63 63
64 num_curr_pgs = 0; 64 num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); 73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
74 if (!paddr) { 74 if (!paddr) {
75 dev_err(hdev->dev, 75 dev_err(hdev->dev,
76 "failed to allocate %u huge contiguous pages\n", 76 "failed to allocate %llu huge contiguous pages\n",
77 num_pgs); 77 num_pgs);
78 return -ENOMEM; 78 return -ENOMEM;
79 } 79 }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
93 phys_pg_pack->flags = args->flags; 93 phys_pg_pack->flags = args->flags;
94 phys_pg_pack->contiguous = contiguous; 94 phys_pg_pack->contiguous = contiguous;
95 95
96 phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); 96 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
97 if (!phys_pg_pack->pages) { 97 if (!phys_pg_pack->pages) {
98 rc = -ENOMEM; 98 rc = -ENOMEM;
99 goto pages_arr_err; 99 goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], 148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
149 page_size); 149 page_size);
150 150
151 kfree(phys_pg_pack->pages); 151 kvfree(phys_pg_pack->pages);
152pages_arr_err: 152pages_arr_err:
153 kfree(phys_pg_pack); 153 kfree(phys_pg_pack);
154pages_pack_err: 154pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
267 struct hl_vm_phys_pg_pack *phys_pg_pack) 267 struct hl_vm_phys_pg_pack *phys_pg_pack)
268{ 268{
269 struct hl_vm *vm = &hdev->vm; 269 struct hl_vm *vm = &hdev->vm;
270 int i; 270 u64 i;
271 271
272 if (!phys_pg_pack->created_from_userptr) { 272 if (!phys_pg_pack->created_from_userptr) {
273 if (phys_pg_pack->contiguous) { 273 if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
288 } 288 }
289 } 289 }
290 290
291 kfree(phys_pg_pack->pages); 291 kvfree(phys_pg_pack->pages);
292 kfree(phys_pg_pack); 292 kfree(phys_pg_pack);
293} 293}
294 294
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
519 * - Return the start address of the virtual block 519 * - Return the start address of the virtual block
520 */ 520 */
521static u64 get_va_block(struct hl_device *hdev, 521static u64 get_va_block(struct hl_device *hdev,
522 struct hl_va_range *va_range, u32 size, u64 hint_addr, 522 struct hl_va_range *va_range, u64 size, u64 hint_addr,
523 bool is_userptr) 523 bool is_userptr)
524{ 524{
525 struct hl_vm_va_block *va_block, *new_va_block = NULL; 525 struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
577 } 577 }
578 578
579 if (!new_va_block) { 579 if (!new_va_block) {
580 dev_err(hdev->dev, "no available va block for size %u\n", size); 580 dev_err(hdev->dev, "no available va block for size %llu\n",
581 size);
581 goto out; 582 goto out;
582 } 583 }
583 584
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
648 struct hl_vm_phys_pg_pack *phys_pg_pack; 649 struct hl_vm_phys_pg_pack *phys_pg_pack;
649 struct scatterlist *sg; 650 struct scatterlist *sg;
650 dma_addr_t dma_addr; 651 dma_addr_t dma_addr;
651 u64 page_mask; 652 u64 page_mask, total_npages;
652 u32 npages, total_npages, page_size = PAGE_SIZE; 653 u32 npages, page_size = PAGE_SIZE;
653 bool first = true, is_huge_page_opt = true; 654 bool first = true, is_huge_page_opt = true;
654 int rc, i, j; 655 int rc, i, j;
655 656
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
691 692
692 page_mask = ~(((u64) page_size) - 1); 693 page_mask = ~(((u64) page_size) - 1);
693 694
694 phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); 695 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
696 GFP_KERNEL);
695 if (!phys_pg_pack->pages) { 697 if (!phys_pg_pack->pages) {
696 rc = -ENOMEM; 698 rc = -ENOMEM;
697 goto page_pack_arr_mem_err; 699 goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
750 struct hl_vm_phys_pg_pack *phys_pg_pack) 752 struct hl_vm_phys_pg_pack *phys_pg_pack)
751{ 753{
752 struct hl_device *hdev = ctx->hdev; 754 struct hl_device *hdev = ctx->hdev;
753 u64 next_vaddr = vaddr, paddr; 755 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
754 u32 page_size = phys_pg_pack->page_size; 756 u32 page_size = phys_pg_pack->page_size;
755 int i, rc = 0, mapped_pg_cnt = 0; 757 int rc = 0;
756 758
757 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 759 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
758 paddr = phys_pg_pack->pages[i]; 760 paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
764 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); 766 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
765 if (rc) { 767 if (rc) {
766 dev_err(hdev->dev, 768 dev_err(hdev->dev,
767 "map failed for handle %u, npages: %d, mapped: %d", 769 "map failed for handle %u, npages: %llu, mapped: %llu",
768 phys_pg_pack->handle, phys_pg_pack->npages, 770 phys_pg_pack->handle, phys_pg_pack->npages,
769 mapped_pg_cnt); 771 mapped_pg_cnt);
770 goto err; 772 goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
985 struct hl_vm_hash_node *hnode = NULL; 987 struct hl_vm_hash_node *hnode = NULL;
986 struct hl_userptr *userptr = NULL; 988 struct hl_userptr *userptr = NULL;
987 enum vm_type_t *vm_type; 989 enum vm_type_t *vm_type;
988 u64 next_vaddr; 990 u64 next_vaddr, i;
989 u32 page_size; 991 u32 page_size;
990 bool is_userptr; 992 bool is_userptr;
991 int i, rc; 993 int rc;
992 994
993 /* protect from double entrance */ 995 /* protect from double entrance */
994 mutex_lock(&ctx->mem_hash_lock); 996 mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) 832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
833{ 833{
834 struct hl_device *hdev = ctx->hdev; 834 struct hl_device *hdev = ctx->hdev;
835 u64 real_virt_addr; 835 u64 real_virt_addr, real_phys_addr;
836 u32 real_page_size, npages; 836 u32 real_page_size, npages;
837 int i, rc, mapped_cnt = 0; 837 int i, rc, mapped_cnt = 0;
838 838
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
857 857
858 npages = page_size / real_page_size; 858 npages = page_size / real_page_size;
859 real_virt_addr = virt_addr; 859 real_virt_addr = virt_addr;
860 real_phys_addr = phys_addr;
860 861
861 for (i = 0 ; i < npages ; i++) { 862 for (i = 0 ; i < npages ; i++) {
862 rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, 863 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
863 real_page_size); 864 real_page_size);
864 if (rc) 865 if (rc)
865 goto err; 866 goto err;
866 867
867 real_virt_addr += real_page_size; 868 real_virt_addr += real_page_size;
869 real_phys_addr += real_page_size;
868 mapped_cnt++; 870 mapped_cnt++;
869 } 871 }
870 872
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
27 27
28#define GUEST_MAPPINGS_TRIES 5 28#define GUEST_MAPPINGS_TRIES 5
29 29
30#define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
33
30/** 34/**
31 * Reserves memory in which the VMM can relocate any guest mappings 35 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around. 36 * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
48 int i, rc; 52 int i, rc;
49 53
50 /* Query the required space. */ 54 /* Query the required space. */
51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); 55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 VBG_KERNEL_REQUEST);
52 if (!req) 57 if (!req)
53 return; 58 return;
54 59
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
135 * Tell the host that we're going to free the memory we reserved for 140 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.) 141 * it, the free it up. (Leak the memory if anything goes wrong here.)
137 */ 142 */
138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); 143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 VBG_KERNEL_REQUEST);
139 if (!req) 145 if (!req)
140 return; 146 return;
141 147
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
172 struct vmmdev_guest_info2 *req2 = NULL; 178 struct vmmdev_guest_info2 *req2 = NULL;
173 int rc, ret = -ENOMEM; 179 int rc, ret = -ENOMEM;
174 180
175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); 181 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); 182 VBG_KERNEL_REQUEST);
183 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
184 VBG_KERNEL_REQUEST);
177 if (!req1 || !req2) 185 if (!req1 || !req2)
178 goto out_free; 186 goto out_free;
179 187
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
187 req2->additions_minor = VBG_VERSION_MINOR; 195 req2->additions_minor = VBG_VERSION_MINOR;
188 req2->additions_build = VBG_VERSION_BUILD; 196 req2->additions_build = VBG_VERSION_BUILD;
189 req2->additions_revision = VBG_SVN_REV; 197 req2->additions_revision = VBG_SVN_REV;
190 /* (no features defined yet) */ 198 req2->additions_features =
191 req2->additions_features = 0; 199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
192 strlcpy(req2->name, VBG_VERSION_STRING, 200 strlcpy(req2->name, VBG_VERSION_STRING,
193 sizeof(req2->name)); 201 sizeof(req2->name));
194 202
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
230 struct vmmdev_guest_status *req; 238 struct vmmdev_guest_status *req;
231 int rc; 239 int rc;
232 240
233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); 241 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
242 VBG_KERNEL_REQUEST);
234 if (!req) 243 if (!req)
235 return -ENOMEM; 244 return -ENOMEM;
236 245
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
423 struct vmmdev_heartbeat *req; 432 struct vmmdev_heartbeat *req;
424 int rc; 433 int rc;
425 434
426 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); 435 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
436 VBG_KERNEL_REQUEST);
427 if (!req) 437 if (!req)
428 return -ENOMEM; 438 return -ENOMEM;
429 439
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
457 467
458 gdev->guest_heartbeat_req = vbg_req_alloc( 468 gdev->guest_heartbeat_req = vbg_req_alloc(
459 sizeof(*gdev->guest_heartbeat_req), 469 sizeof(*gdev->guest_heartbeat_req),
460 VMMDEVREQ_GUEST_HEARTBEAT); 470 VMMDEVREQ_GUEST_HEARTBEAT,
471 VBG_KERNEL_REQUEST);
461 if (!gdev->guest_heartbeat_req) 472 if (!gdev->guest_heartbeat_req)
462 return -ENOMEM; 473 return -ENOMEM;
463 474
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
528 struct vmmdev_mask *req; 539 struct vmmdev_mask *req;
529 int rc; 540 int rc;
530 541
531 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 542 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
543 VBG_KERNEL_REQUEST);
532 if (!req) 544 if (!req)
533 return -ENOMEM; 545 return -ENOMEM;
534 546
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
567 u32 changed, previous; 579 u32 changed, previous;
568 int rc, ret = 0; 580 int rc, ret = 0;
569 581
570 /* Allocate a request buffer before taking the spinlock */ 582 /*
571 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 583 * Allocate a request buffer before taking the spinlock, when
584 * the session is being terminated the requestor is the kernel,
585 * as we're cleaning up.
586 */
587 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
588 session_termination ? VBG_KERNEL_REQUEST :
589 session->requestor);
572 if (!req) { 590 if (!req) {
573 if (!session_termination) 591 if (!session_termination)
574 return -ENOMEM; 592 return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
627 struct vmmdev_mask *req; 645 struct vmmdev_mask *req;
628 int rc; 646 int rc;
629 647
630 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 648 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
649 VBG_KERNEL_REQUEST);
631 if (!req) 650 if (!req)
632 return -ENOMEM; 651 return -ENOMEM;
633 652
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
662 u32 changed, previous; 681 u32 changed, previous;
663 int rc, ret = 0; 682 int rc, ret = 0;
664 683
665 /* Allocate a request buffer before taking the spinlock */ 684 /*
666 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 685 * Allocate a request buffer before taking the spinlock, when
686 * the session is being terminated the requestor is the kernel,
687 * as we're cleaning up.
688 */
689 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
690 session_termination ? VBG_KERNEL_REQUEST :
691 session->requestor);
667 if (!req) { 692 if (!req) {
668 if (!session_termination) 693 if (!session_termination)
669 return -ENOMEM; 694 return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
722 struct vmmdev_host_version *req; 747 struct vmmdev_host_version *req;
723 int rc, ret; 748 int rc, ret;
724 749
725 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); 750 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
751 VBG_KERNEL_REQUEST);
726 if (!req) 752 if (!req)
727 return -ENOMEM; 753 return -ENOMEM;
728 754
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
783 809
784 gdev->mem_balloon.get_req = 810 gdev->mem_balloon.get_req =
785 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), 811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
786 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); 812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
813 VBG_KERNEL_REQUEST);
787 gdev->mem_balloon.change_req = 814 gdev->mem_balloon.change_req =
788 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), 815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
789 VMMDEVREQ_CHANGE_MEMBALLOON); 816 VMMDEVREQ_CHANGE_MEMBALLOON,
817 VBG_KERNEL_REQUEST);
790 gdev->cancel_req = 818 gdev->cancel_req =
791 vbg_req_alloc(sizeof(*(gdev->cancel_req)), 819 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
792 VMMDEVREQ_HGCM_CANCEL2); 820 VMMDEVREQ_HGCM_CANCEL2,
821 VBG_KERNEL_REQUEST);
793 gdev->ack_events_req = 822 gdev->ack_events_req =
794 vbg_req_alloc(sizeof(*gdev->ack_events_req), 823 vbg_req_alloc(sizeof(*gdev->ack_events_req),
795 VMMDEVREQ_ACKNOWLEDGE_EVENTS); 824 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
825 VBG_KERNEL_REQUEST);
796 gdev->mouse_status_req = 826 gdev->mouse_status_req =
797 vbg_req_alloc(sizeof(*gdev->mouse_status_req), 827 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
798 VMMDEVREQ_GET_MOUSE_STATUS); 828 VMMDEVREQ_GET_MOUSE_STATUS,
829 VBG_KERNEL_REQUEST);
799 830
800 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || 831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
801 !gdev->cancel_req || !gdev->ack_events_req || 832 !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
892 * vboxguest_linux.c calls this when userspace opens the char-device. 923 * vboxguest_linux.c calls this when userspace opens the char-device.
893 * Return: A pointer to the new session or an ERR_PTR on error. 924 * Return: A pointer to the new session or an ERR_PTR on error.
894 * @gdev: The Guest extension device. 925 * @gdev: The Guest extension device.
895 * @user: Set if this is a session for the vboxuser device. 926 * @requestor: VMMDEV_REQUESTOR_* flags
896 */ 927 */
897struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) 928struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
898{ 929{
899 struct vbg_session *session; 930 struct vbg_session *session;
900 931
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
903 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
904 935
905 session->gdev = gdev; 936 session->gdev = gdev;
906 session->user_session = user; 937 session->requestor = requestor;
907 938
908 return session; 939 return session;
909} 940}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
924 if (!session->hgcm_client_ids[i]) 955 if (!session->hgcm_client_ids[i])
925 continue; 956 continue;
926 957
927 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); 958 /* requestor is kernel here, as we're cleaning up. */
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
960 session->hgcm_client_ids[i], &rc);
928 } 961 }
929 962
930 kfree(session); 963 kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1152 return -EPERM; 1185 return -EPERM;
1153 } 1186 }
1154 1187
1155 if (trusted_apps_only && session->user_session) { 1188 if (trusted_apps_only &&
1189 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1156 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", 1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157 req->request_type); 1191 req->request_type);
1158 return -EPERM; 1192 return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1209 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1243 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1210 return -EMFILE; 1244 return -EMFILE;
1211 1245
1212 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, 1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1213 &conn->hdr.rc); 1247 &client_id, &conn->hdr.rc);
1214 1248
1215 mutex_lock(&gdev->session_mutex); 1249 mutex_lock(&gdev->session_mutex);
1216 if (ret == 0 && conn->hdr.rc >= 0) { 1250 if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1251 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1285 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1252 return -EINVAL; 1286 return -EINVAL;
1253 1287
1254 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); 1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1289 &disconn->hdr.rc);
1255 1290
1256 mutex_lock(&gdev->session_mutex); 1291 mutex_lock(&gdev->session_mutex);
1257 if (ret == 0 && disconn->hdr.rc >= 0) 1292 if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1313 } 1348 }
1314 1349
1315 if (IS_ENABLED(CONFIG_COMPAT) && f32bit) 1350 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1316 ret = vbg_hgcm_call32(gdev, client_id, 1351 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1317 call->function, call->timeout_ms, 1352 call->function, call->timeout_ms,
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call), 1353 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1319 call->parm_count, &call->hdr.rc); 1354 call->parm_count, &call->hdr.rc);
1320 else 1355 else
1321 ret = vbg_hgcm_call(gdev, client_id, 1356 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1322 call->function, call->timeout_ms, 1357 call->function, call->timeout_ms,
1323 VBG_IOCTL_HGCM_CALL_PARMS(call), 1358 VBG_IOCTL_HGCM_CALL_PARMS(call),
1324 call->parm_count, &call->hdr.rc); 1359 call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1408} 1443}
1409 1444
1410static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, 1445static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1446 struct vbg_session *session,
1411 struct vbg_ioctl_write_coredump *dump) 1447 struct vbg_ioctl_write_coredump *dump)
1412{ 1448{
1413 struct vmmdev_write_core_dump *req; 1449 struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1415 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) 1451 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1416 return -EINVAL; 1452 return -EINVAL;
1417 1453
1418 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); 1454 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1455 session->requestor);
1419 if (!req) 1456 if (!req)
1420 return -ENOMEM; 1457 return -ENOMEM;
1421 1458
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1476 case VBG_IOCTL_CHECK_BALLOON: 1513 case VBG_IOCTL_CHECK_BALLOON:
1477 return vbg_ioctl_check_balloon(gdev, data); 1514 return vbg_ioctl_check_balloon(gdev, data);
1478 case VBG_IOCTL_WRITE_CORE_DUMP: 1515 case VBG_IOCTL_WRITE_CORE_DUMP:
1479 return vbg_ioctl_write_core_dump(gdev, data); 1516 return vbg_ioctl_write_core_dump(gdev, session, data);
1480 } 1517 }
1481 1518
1482 /* Variable sized requests. */ 1519 /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1508 struct vmmdev_mouse_status *req; 1545 struct vmmdev_mouse_status *req;
1509 int rc; 1546 int rc;
1510 1547
1511 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); 1548 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1549 VBG_KERNEL_REQUEST);
1512 if (!req) 1550 if (!req)
1513 return -ENOMEM; 1551 return -ENOMEM;
1514 1552
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
154 * host. Protected by vbg_gdev.session_mutex. 154 * host. Protected by vbg_gdev.session_mutex.
155 */ 155 */
156 u32 guest_caps; 156 u32 guest_caps;
157 /** Does this session belong to a root process or a user one? */ 157 /** VMMDEV_REQUESTOR_* flags */
158 bool user_session; 158 u32 requestor;
159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ 159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
160 bool cancel_waiters; 160 bool cancel_waiters;
161}; 161};
162 162
163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); 163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
164void vbg_core_exit(struct vbg_dev *gdev); 164void vbg_core_exit(struct vbg_dev *gdev);
165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); 165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
166void vbg_core_close_session(struct vbg_session *session); 166void vbg_core_close_session(struct vbg_session *session);
167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); 167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); 168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
172void vbg_linux_mouse_event(struct vbg_dev *gdev); 172void vbg_linux_mouse_event(struct vbg_dev *gdev);
173 173
174/* Private (non exported) functions form vboxguest_utils.c */ 174/* Private (non exported) functions form vboxguest_utils.c */
175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); 175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
176 u32 requestor);
176void vbg_req_free(void *req, size_t len); 177void vbg_req_free(void *req, size_t len);
177int vbg_req_perform(struct vbg_dev *gdev, void *req); 178int vbg_req_perform(struct vbg_dev *gdev, void *req);
178int vbg_hgcm_call32( 179int vbg_hgcm_call32(
179 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 180 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
180 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 181 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
181 int *vbox_status); 182 u32 parm_count, int *vbox_status);
182 183
183#endif 184#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2006-2016 Oracle Corporation 5 * Copyright (C) 2006-2016 Oracle Corporation
6 */ 6 */
7 7
8#include <linux/cred.h>
8#include <linux/input.h> 9#include <linux/input.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/miscdevice.h> 11#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
28/** Global vbg_gdev pointer used by vbg_get/put_gdev. */ 29/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
29static struct vbg_dev *vbg_gdev; 30static struct vbg_dev *vbg_gdev;
30 31
32static u32 vbg_misc_device_requestor(struct inode *inode)
33{
34 u32 requestor = VMMDEV_REQUESTOR_USERMODE |
35 VMMDEV_REQUESTOR_CON_DONT_KNOW |
36 VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
37
38 if (from_kuid(current_user_ns(), current->cred->uid) == 0)
39 requestor |= VMMDEV_REQUESTOR_USR_ROOT;
40 else
41 requestor |= VMMDEV_REQUESTOR_USR_USER;
42
43 if (in_egroup_p(inode->i_gid))
44 requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
45
46 return requestor;
47}
48
31static int vbg_misc_device_open(struct inode *inode, struct file *filp) 49static int vbg_misc_device_open(struct inode *inode, struct file *filp)
32{ 50{
33 struct vbg_session *session; 51 struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
36 /* misc_open sets filp->private_data to our misc device */ 54 /* misc_open sets filp->private_data to our misc device */
37 gdev = container_of(filp->private_data, struct vbg_dev, misc_device); 55 gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
38 56
39 session = vbg_core_open_session(gdev, false); 57 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
40 if (IS_ERR(session)) 58 if (IS_ERR(session))
41 return PTR_ERR(session); 59 return PTR_ERR(session);
42 60
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
53 gdev = container_of(filp->private_data, struct vbg_dev, 71 gdev = container_of(filp->private_data, struct vbg_dev,
54 misc_device_user); 72 misc_device_user);
55 73
56 session = vbg_core_open_session(gdev, false); 74 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
75 VMMDEV_REQUESTOR_USER_DEVICE);
57 if (IS_ERR(session)) 76 if (IS_ERR(session))
58 return PTR_ERR(session); 77 return PTR_ERR(session);
59 78
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
115 req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 134 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
116 135
117 if (is_vmmdev_req) 136 if (is_vmmdev_req)
118 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); 137 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
138 session->requestor);
119 else 139 else
120 buf = kmalloc(size, GFP_KERNEL); 140 buf = kmalloc(size, GFP_KERNEL);
121 if (!buf) 141 if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
62VBG_LOG(vbg_debug, pr_debug); 62VBG_LOG(vbg_debug, pr_debug);
63#endif 63#endif
64 64
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
66 u32 requestor)
66{ 67{
67 struct vmmdev_request_header *req; 68 struct vmmdev_request_header *req;
68 int order = get_order(PAGE_ALIGN(len)); 69 int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
78 req->request_type = req_type; 79 req->request_type = req_type;
79 req->rc = VERR_GENERAL_FAILURE; 80 req->rc = VERR_GENERAL_FAILURE;
80 req->reserved1 = 0; 81 req->reserved1 = 0;
81 req->reserved2 = 0; 82 req->requestor = requestor;
82 83
83 return req; 84 return req;
84} 85}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
119 return done; 120 return done;
120} 121}
121 122
122int vbg_hgcm_connect(struct vbg_dev *gdev, 123int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
123 struct vmmdev_hgcm_service_location *loc, 124 struct vmmdev_hgcm_service_location *loc,
124 u32 *client_id, int *vbox_status) 125 u32 *client_id, int *vbox_status)
125{ 126{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
127 int rc; 128 int rc;
128 129
129 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), 130 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
130 VMMDEVREQ_HGCM_CONNECT); 131 VMMDEVREQ_HGCM_CONNECT, requestor);
131 if (!hgcm_connect) 132 if (!hgcm_connect)
132 return -ENOMEM; 133 return -ENOMEM;
133 134
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
153} 154}
154EXPORT_SYMBOL(vbg_hgcm_connect); 155EXPORT_SYMBOL(vbg_hgcm_connect);
155 156
156int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) 157int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
158 u32 client_id, int *vbox_status)
157{ 159{
158 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; 160 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
159 int rc; 161 int rc;
160 162
161 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), 163 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
162 VMMDEVREQ_HGCM_DISCONNECT); 164 VMMDEVREQ_HGCM_DISCONNECT,
165 requestor);
163 if (!hgcm_disconnect) 166 if (!hgcm_disconnect)
164 return -ENOMEM; 167 return -ENOMEM;
165 168
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
593 return 0; 596 return 0;
594} 597}
595 598
596int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 599int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
597 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 600 u32 function, u32 timeout_ms,
598 u32 parm_count, int *vbox_status) 601 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
602 int *vbox_status)
599{ 603{
600 struct vmmdev_hgcm_call *call; 604 struct vmmdev_hgcm_call *call;
601 void **bounce_bufs = NULL; 605 void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
615 goto free_bounce_bufs; 619 goto free_bounce_bufs;
616 } 620 }
617 621
618 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); 622 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
619 if (!call) { 623 if (!call) {
620 ret = -ENOMEM; 624 ret = -ENOMEM;
621 goto free_bounce_bufs; 625 goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
647 651
648#ifdef CONFIG_COMPAT 652#ifdef CONFIG_COMPAT
649int vbg_hgcm_call32( 653int vbg_hgcm_call32(
650 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 654 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
651 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 655 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
652 int *vbox_status) 656 u32 parm_count, int *vbox_status)
653{ 657{
654 struct vmmdev_hgcm_function_parameter *parm64 = NULL; 658 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
655 u32 i, size; 659 u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
689 goto out_free; 693 goto out_free;
690 } 694 }
691 695
692 ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, 696 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
693 parm64, parm_count, vbox_status); 697 parm64, parm_count, vbox_status);
694 if (ret < 0) 698 if (ret < 0)
695 goto out_free; 699 goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
9#ifndef __VBOX_VERSION_H__ 9#ifndef __VBOX_VERSION_H__
10#define __VBOX_VERSION_H__ 10#define __VBOX_VERSION_H__
11 11
12/* Last synced October 4th 2017 */ 12#define VBG_VERSION_MAJOR 6
13#define VBG_VERSION_MAJOR 5 13#define VBG_VERSION_MINOR 0
14#define VBG_VERSION_MINOR 2
15#define VBG_VERSION_BUILD 0 14#define VBG_VERSION_BUILD 0
16#define VBG_SVN_REV 68940 15#define VBG_SVN_REV 127566
17#define VBG_VERSION_STRING "5.2.0" 16#define VBG_VERSION_STRING "6.0.0"
18 17
19#endif 18#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
98 s32 rc; 98 s32 rc;
99 /** Reserved field no.1. MBZ. */ 99 /** Reserved field no.1. MBZ. */
100 u32 reserved1; 100 u32 reserved1;
101 /** Reserved field no.2. MBZ. */ 101 /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
102 u32 reserved2; 102 u32 requestor;
103}; 103};
104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); 104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
105 105
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
247}; 247};
248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); 248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
249 249
250#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
251
250/** struct vmmdev_guestinfo2 - Guest information report, version 2. */ 252/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
251struct vmmdev_guest_info2 { 253struct vmmdev_guest_info2 {
252 /** Header. */ 254 /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
259 u32 additions_build; 261 u32 additions_build;
260 /** SVN revision. */ 262 /** SVN revision. */
261 u32 additions_revision; 263 u32 additions_revision;
262 /** Feature mask, currently unused. */ 264 /** Feature mask. */
263 u32 additions_features; 265 u32 additions_features;
264 /** 266 /**
265 * The intentional meaning of this field was: 267 * The intentional meaning of this field was:
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index a240ed2a0372..ff56c443180c 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
24#define vbg_debug pr_debug 24#define vbg_debug pr_debug
25#endif 25#endif
26 26
27int vbg_hgcm_connect(struct vbg_dev *gdev, 27int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
28 struct vmmdev_hgcm_service_location *loc, 28 struct vmmdev_hgcm_service_location *loc,
29 u32 *client_id, int *vbox_status); 29 u32 *client_id, int *vbox_status);
30 30
31int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); 31int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
32 u32 client_id, int *vbox_status);
32 33
33int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 34int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
34 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 35 u32 function, u32 timeout_ms,
35 u32 parm_count, int *vbox_status); 36 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
37 int *vbox_status);
36 38
37/** 39/**
38 * Convert a VirtualBox status code to a standard Linux kernel return value. 40 * Convert a VirtualBox status code to a standard Linux kernel return value.
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 0e68024f36c7..26f39816af14 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32 102#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
103#endif 103#endif
104 104
105/* vmmdev_request_header.requestor defines */
106
107/* Requestor user not given. */
108#define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000
109/* The kernel driver (vboxguest) is the requestor. */
110#define VMMDEV_REQUESTOR_USR_DRV 0x00000001
111/* Some other kernel driver is the requestor. */
112#define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002
113/* The root or a admin user is the requestor. */
114#define VMMDEV_REQUESTOR_USR_ROOT 0x00000003
115/* Regular joe user is making the request. */
116#define VMMDEV_REQUESTOR_USR_USER 0x00000006
117/* User classification mask. */
118#define VMMDEV_REQUESTOR_USR_MASK 0x00000007
119
120/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
121#define VMMDEV_REQUESTOR_KERNEL 0x00000000
122/* User mode request. */
123#define VMMDEV_REQUESTOR_USERMODE 0x00000008
124/* User or kernel mode classification mask. */
125#define VMMDEV_REQUESTOR_MODE_MASK 0x00000008
126
127/* Don't know the physical console association of the requestor. */
128#define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000
129/*
130 * The request originates with a process that is NOT associated with the
131 * physical console.
132 */
133#define VMMDEV_REQUESTOR_CON_NO 0x00000010
134/* Requestor process is associated with the physical console. */
135#define VMMDEV_REQUESTOR_CON_YES 0x00000020
136/* Console classification mask. */
137#define VMMDEV_REQUESTOR_CON_MASK 0x00000030
138
139/* Requestor is member of special VirtualBox user group. */
140#define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080
141
142/* Note: trust level is for windows guests only, linux always uses not-given */
143/* Requestor trust level: Unspecified */
144#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000
145/* Requestor trust level: Untrusted (SID S-1-16-0) */
146#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000
147/* Requestor trust level: Untrusted (SID S-1-16-4096) */
148#define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000
149/* Requestor trust level: Medium (SID S-1-16-8192) */
150#define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000
151/* Requestor trust level: Medium plus (SID S-1-16-8448) */
152#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000
153/* Requestor trust level: High (SID S-1-16-12288) */
154#define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000
155/* Requestor trust level: System (SID S-1-16-16384) */
156#define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000
157/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
158#define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000
159/* Requestor trust level mask */
160#define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000
161
162/* Requestor is using the less trusted user device node (/dev/vboxuser) */
163#define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000
164
105/** HGCM service location types. */ 165/** HGCM service location types. */
106enum vmmdev_hgcm_service_location_type { 166enum vmmdev_hgcm_service_location_type {
107 VMMDEV_HGCM_LOC_INVALID = 0, 167 VMMDEV_HGCM_LOC_INVALID = 0,