aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-22 11:53:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-22 11:53:24 -0400
commitbcb53e5769227813b7878df1ec9d329b0bd68f74 (patch)
tree96b374131cf6bc4bae12dfaea97bea707200c341
parent82abbea734d659b4218ad06734b4927b43261985 (diff)
parent5a1d4c5dd4eb2f1f8a9b30e61762f3b3b564df70 (diff)
Merge tag 'staging-4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging driver fixes from Greg KH: "Here are some small staging driver fixes for reported issues for 4.13-rc2. Also in here is a new driver, the virtualbox DRM driver. It's stand-alone and got acks from the DRM developers to go in through this tree. It's a new thing, but it should be fine for this point in the rc cycle due to it being independent. All of this has been in linux-next for a while with no reported issues" * tag 'staging-4.13-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: staging: rtl8188eu: add TL-WN722N v2 support staging: speakup: safely register and unregister ldisc staging: speakup: add functions to register and unregister ldisc staging: speakup: safely close tty staging: sm750fb: avoid conflicting vesafb staging: lustre: ko2iblnd: check copy_from_iter/copy_to_iter return code staging: vboxvideo: Add vboxvideo to drivers/staging staging: sm750fb: fixed a assignment typo staging: rtl8188eu: memory leak in rtw_free_cmd_obj() staging: vchiq_arm: fix error codes in probe staging: comedi: ni_mio_common: fix AO timer off-by-one regression
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c19
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c24
-rw-r--r--drivers/staging/speakup/main.c2
-rw-r--r--drivers/staging/speakup/spk_priv.h2
-rw-r--r--drivers/staging/speakup/spk_ttyio.c22
-rw-r--r--drivers/staging/vboxvideo/Kconfig12
-rw-r--r--drivers/staging/vboxvideo/Makefile7
-rw-r--r--drivers/staging/vboxvideo/TODO9
-rw-r--r--drivers/staging/vboxvideo/hgsmi_base.c246
-rw-r--r--drivers/staging/vboxvideo/hgsmi_ch_setup.h66
-rw-r--r--drivers/staging/vboxvideo/hgsmi_channels.h53
-rw-r--r--drivers/staging/vboxvideo/hgsmi_defs.h92
-rw-r--r--drivers/staging/vboxvideo/modesetting.c142
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c286
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h296
-rw-r--r--drivers/staging/vboxvideo/vbox_err.h50
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c412
-rw-r--r--drivers/staging/vboxvideo/vbox_hgsmi.c115
-rw-r--r--drivers/staging/vboxvideo/vbox_irq.c197
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c534
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c877
-rw-r--r--drivers/staging/vboxvideo/vbox_prime.c74
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c472
-rw-r--r--drivers/staging/vboxvideo/vboxvideo.h491
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_guest.h95
-rw-r--r--drivers/staging/vboxvideo/vboxvideo_vbe.h84
-rw-r--r--drivers/staging/vboxvideo/vbva_base.c233
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c10
34 files changed, 4912 insertions, 21 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 268d4e6ef48a..ef28a1cb64ae 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig"
110 110
111source "drivers/staging/typec/Kconfig" 111source "drivers/staging/typec/Kconfig"
112 112
113source "drivers/staging/vboxvideo/Kconfig"
114
113endif # STAGING 115endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index b93e6f5f0f6e..2918580bdb9e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/
44obj-$(CONFIG_GREYBUS) += greybus/ 44obj-$(CONFIG_GREYBUS) += greybus/
45obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ 45obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
46obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ 46obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/
47obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b2e382888981..2f7bfc1c59e5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
3116 /* following line: 2-1 per STC */ 3116 /* following line: 2-1 per STC */
3117 ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); 3117 ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
3118 ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG); 3118 ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
3119 /* following line: N-1 per STC */ 3119 ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG);
3120 ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
3121 } else { /* TRIG_EXT */ 3120 } else { /* TRIG_EXT */
3122 /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ 3121 /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
3123 devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; 3122 devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 85b242ec5f9b..8fc191d99927 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1640 ibmsg = tx->tx_msg; 1640 ibmsg = tx->tx_msg;
1641 ibmsg->ibm_u.immediate.ibim_hdr = *hdr; 1641 ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1642 1642
1643 copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE, 1643 rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob,
1644 &from); 1644 &from);
1645 if (rc != payload_nob) {
1646 kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
1647 return -EFAULT;
1648 }
1649
1645 nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); 1650 nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
1646 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); 1651 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1647 1652
@@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1741 break; 1746 break;
1742 } 1747 }
1743 1748
1744 copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, 1749 rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen,
1745 IBLND_MSG_SIZE, to); 1750 to);
1751 if (rc != rlen) {
1752 rc = -EFAULT;
1753 break;
1754 }
1755
1756 rc = 0;
1746 lnet_finalize(ni, lntmsg, 0); 1757 lnet_finalize(ni, lntmsg, 0);
1747 break; 1758 break;
1748 1759
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 002d09159896..a69007ef77bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
132 kfree(pcmd->parmbuf); 132 kfree(pcmd->parmbuf);
133 } 133 }
134 134
135 if (!pcmd->rsp) { 135 if (pcmd->rsp) {
136 if (pcmd->rspsz != 0) { 136 if (pcmd->rspsz != 0) {
137 /* free rsp in cmd_obj */ 137 /* free rsp in cmd_obj */
138 kfree(pcmd->rsp); 138 kfree(pcmd->rsp);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 963235fd7292..d283341cfe43 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
43 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 43 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
44 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 44 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
45 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ 45 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
46 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
46 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 47 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
47 {} /* Terminating entry */ 48 {} /* Terminating entry */
48}; 49};
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 944dd25924be..4754f7a20684 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void)
40 40
41 pll_reg = peek32(MXCLK_PLL_CTRL); 41 pll_reg = peek32(MXCLK_PLL_CTRL);
42 M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT; 42 M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT;
43 N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT; 43 N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT;
44 OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT; 44 OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT;
45 POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT; 45 POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT;
46 46
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index 3aa4128703d5..67207b0554cd 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1053,6 +1053,26 @@ release_fb:
1053 return err; 1053 return err;
1054} 1054}
1055 1055
1056static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
1057{
1058 struct apertures_struct *ap;
1059 bool primary = false;
1060
1061 ap = alloc_apertures(1);
1062 if (!ap)
1063 return -ENOMEM;
1064
1065 ap->ranges[0].base = pci_resource_start(pdev, 0);
1066 ap->ranges[0].size = pci_resource_len(pdev, 0);
1067#ifdef CONFIG_X86
1068 primary = pdev->resource[PCI_ROM_RESOURCE].flags &
1069 IORESOURCE_ROM_SHADOW;
1070#endif
1071 remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
1072 kfree(ap);
1073 return 0;
1074}
1075
1056static int lynxfb_pci_probe(struct pci_dev *pdev, 1076static int lynxfb_pci_probe(struct pci_dev *pdev,
1057 const struct pci_device_id *ent) 1077 const struct pci_device_id *ent)
1058{ 1078{
@@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev,
1061 int fbidx; 1081 int fbidx;
1062 int err; 1082 int err;
1063 1083
1084 err = lynxfb_kick_out_firmware_fb(pdev);
1085 if (err)
1086 return err;
1087
1064 /* enable device */ 1088 /* enable device */
1065 err = pcim_enable_device(pdev); 1089 err = pcim_enable_device(pdev);
1066 if (err) 1090 if (err)
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 82e5de248947..67956e24779c 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void)
2314 mutex_lock(&spk_mutex); 2314 mutex_lock(&spk_mutex);
2315 synth_release(); 2315 synth_release();
2316 mutex_unlock(&spk_mutex); 2316 mutex_unlock(&spk_mutex);
2317 spk_ttyio_unregister_ldisc();
2317 2318
2318 speakup_kobj_exit(); 2319 speakup_kobj_exit();
2319 2320
@@ -2376,6 +2377,7 @@ static int __init speakup_init(void)
2376 if (err) 2377 if (err)
2377 goto error_kobjects; 2378 goto error_kobjects;
2378 2379
2380 spk_ttyio_register_ldisc();
2379 synth_init(synth_name); 2381 synth_init(synth_name);
2380 speakup_register_devsynth(); 2382 speakup_register_devsynth();
2381 /* 2383 /*
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index 87b6a0a4c54d..046040ac074c 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void);
48int spk_wait_for_xmitr(struct spk_synth *in_synth); 48int spk_wait_for_xmitr(struct spk_synth *in_synth);
49void spk_serial_release(void); 49void spk_serial_release(void);
50void spk_ttyio_release(void); 50void spk_ttyio_release(void);
51void spk_ttyio_register_ldisc(void);
52void spk_ttyio_unregister_ldisc(void);
51 53
52void synth_buffer_skip_nonlatin1(void); 54void synth_buffer_skip_nonlatin1(void);
53u16 synth_buffer_getc(void); 55u16 synth_buffer_getc(void);
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index ed8e96b06ead..fe340b07c482 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
154 struct ktermios tmp_termios; 154 struct ktermios tmp_termios;
155 dev_t dev; 155 dev_t dev;
156 156
157 ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops);
158 if (ret) {
159 pr_err("Error registering line discipline.\n");
160 return ret;
161 }
162
163 ret = get_dev_to_use(synth, &dev); 157 ret = get_dev_to_use(synth, &dev);
164 if (ret) 158 if (ret)
165 return ret; 159 return ret;
@@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth)
196 tty_unlock(tty); 190 tty_unlock(tty);
197 191
198 ret = tty_set_ldisc(tty, N_SPEAKUP); 192 ret = tty_set_ldisc(tty, N_SPEAKUP);
193 if (ret)
194 pr_err("speakup: Failed to set N_SPEAKUP on tty\n");
199 195
200 return ret; 196 return ret;
201} 197}
202 198
199void spk_ttyio_register_ldisc(void)
200{
201 if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops))
202 pr_warn("speakup: Error registering line discipline. Most synths won't work.\n");
203}
204
205void spk_ttyio_unregister_ldisc(void)
206{
207 if (tty_unregister_ldisc(N_SPEAKUP))
208 pr_warn("speakup: Couldn't unregister ldisc\n");
209}
210
203static int spk_ttyio_out(struct spk_synth *in_synth, const char ch) 211static int spk_ttyio_out(struct spk_synth *in_synth, const char ch)
204{ 212{
205 if (in_synth->alive && speakup_tty && speakup_tty->ops->write) { 213 if (in_synth->alive && speakup_tty && speakup_tty->ops->write) {
@@ -300,7 +308,7 @@ void spk_ttyio_release(void)
300 308
301 tty_ldisc_flush(speakup_tty); 309 tty_ldisc_flush(speakup_tty);
302 tty_unlock(speakup_tty); 310 tty_unlock(speakup_tty);
303 tty_ldisc_release(speakup_tty); 311 tty_release_struct(speakup_tty, speakup_tty->index);
304} 312}
305EXPORT_SYMBOL_GPL(spk_ttyio_release); 313EXPORT_SYMBOL_GPL(spk_ttyio_release);
306 314
diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig
new file mode 100644
index 000000000000..a52746f9a670
--- /dev/null
+++ b/drivers/staging/vboxvideo/Kconfig
@@ -0,0 +1,12 @@
1config DRM_VBOXVIDEO
2 tristate "Virtual Box Graphics Card"
3 depends on DRM && X86 && PCI
4 select DRM_KMS_HELPER
5 help
6 This is a KMS driver for the virtual Graphics Card used in
7 Virtual Box virtual machines.
8
9 Although it is possible to builtin this module, it is advised
10 to build this driver as a module, so that it can be updated
11 independently of the kernel. Select M to built this driver as a
12 module and add support for these devices via drm/kms interfaces.
diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile
new file mode 100644
index 000000000000..2d0b3bc7ad73
--- /dev/null
+++ b/drivers/staging/vboxvideo/Makefile
@@ -0,0 +1,7 @@
1ccflags-y := -Iinclude/drm
2
3vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
4 vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
5 vbox_mode.o vbox_prime.o vbox_ttm.o
6
7obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO
new file mode 100644
index 000000000000..ce764309b079
--- /dev/null
+++ b/drivers/staging/vboxvideo/TODO
@@ -0,0 +1,9 @@
1TODO:
2-Move the driver over to the atomic API
3-Stop using old load / unload drm_driver hooks
4-Get a full review from the drm-maintainers on dri-devel done on this driver
5-Extend this TODO with the results of that review
6
7Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
8Hans de Goede <hdegoede@redhat.com> and
9Michael Thayer <michael.thayer@oracle.com>.
diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/staging/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..15ff5f42e2cd
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_base.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vbox_drv.h"
24#include "vbox_err.h"
25#include "vboxvideo_guest.h"
26#include "vboxvideo_vbe.h"
27#include "hgsmi_channels.h"
28#include "hgsmi_ch_setup.h"
29
30/**
31 * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
32 * @param ctx the context of the guest heap to use.
33 * @param location the offset chosen for the flags within guest VRAM.
34 * @returns 0 on success, -errno on failure
35 */
36int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
37{
38 struct hgsmi_buffer_location *p;
39
40 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
41 HGSMI_CC_HOST_FLAGS_LOCATION);
42 if (!p)
43 return -ENOMEM;
44
45 p->buf_location = location;
46 p->buf_len = sizeof(struct hgsmi_host_flags);
47
48 hgsmi_buffer_submit(ctx, p);
49 hgsmi_buffer_free(ctx, p);
50
51 return 0;
52}
53
54/**
55 * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
56 * @param ctx the context of the guest heap to use.
57 * @param caps the capabilities to report, see vbva_caps.
58 * @returns 0 on success, -errno on failure
59 */
60int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
61{
62 struct vbva_caps *p;
63
64 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
65 if (!p)
66 return -ENOMEM;
67
68 p->rc = VERR_NOT_IMPLEMENTED;
69 p->caps = caps;
70
71 hgsmi_buffer_submit(ctx, p);
72
73 WARN_ON_ONCE(RT_FAILURE(p->rc));
74
75 hgsmi_buffer_free(ctx, p);
76
77 return 0;
78}
79
80int hgsmi_test_query_conf(struct gen_pool *ctx)
81{
82 u32 value = 0;
83 int ret;
84
85 ret = hgsmi_query_conf(ctx, U32_MAX, &value);
86 if (ret)
87 return ret;
88
89 return value == U32_MAX ? 0 : -EIO;
90}
91
92/**
93 * Query the host for an HGSMI configuration parameter via an HGSMI command.
94 * @param ctx the context containing the heap used
95 * @param index the index of the parameter to query,
96 * @see vbva_conf32::index
97 * @param value_ret where to store the value of the parameter on success
98 * @returns 0 on success, -errno on failure
99 */
100int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
101{
102 struct vbva_conf32 *p;
103
104 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
105 VBVA_QUERY_CONF32);
106 if (!p)
107 return -ENOMEM;
108
109 p->index = index;
110 p->value = U32_MAX;
111
112 hgsmi_buffer_submit(ctx, p);
113
114 *value_ret = p->value;
115
116 hgsmi_buffer_free(ctx, p);
117
118 return 0;
119}
120
121/**
122 * Pass the host a new mouse pointer shape via an HGSMI command.
123 *
124 * @param ctx the context containing the heap to be used
125 * @param flags cursor flags, @see VMMDevReqMousePointer::flags
126 * @param hot_x horizontal position of the hot spot
127 * @param hot_y vertical position of the hot spot
128 * @param width width in pixels of the cursor
129 * @param height height in pixels of the cursor
130 * @param pixels pixel data, @see VMMDevReqMousePointer for the format
131 * @param len size in bytes of the pixel data
132 * @returns 0 on success, -errno on failure
133 */
134int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
135 u32 hot_x, u32 hot_y, u32 width, u32 height,
136 u8 *pixels, u32 len)
137{
138 struct vbva_mouse_pointer_shape *p;
139 u32 pixel_len = 0;
140 int rc;
141
142 if (flags & VBOX_MOUSE_POINTER_SHAPE) {
143 /*
144 * Size of the pointer data:
145 * sizeof (AND mask) + sizeof (XOR_MASK)
146 */
147 pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
148 width * 4 * height;
149 if (pixel_len > len)
150 return -EINVAL;
151
152 /*
153 * If shape is supplied, then always create the pointer visible.
154 * See comments in 'vboxUpdatePointerShape'
155 */
156 flags |= VBOX_MOUSE_POINTER_VISIBLE;
157 }
158
159 p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
160 VBVA_MOUSE_POINTER_SHAPE);
161 if (!p)
162 return -ENOMEM;
163
164 p->result = VINF_SUCCESS;
165 p->flags = flags;
166 p->hot_X = hot_x;
167 p->hot_y = hot_y;
168 p->width = width;
169 p->height = height;
170 if (pixel_len)
171 memcpy(p->data, pixels, pixel_len);
172
173 hgsmi_buffer_submit(ctx, p);
174
175 switch (p->result) {
176 case VINF_SUCCESS:
177 rc = 0;
178 break;
179 case VERR_NO_MEMORY:
180 rc = -ENOMEM;
181 break;
182 case VERR_NOT_SUPPORTED:
183 rc = -EBUSY;
184 break;
185 default:
186 rc = -EINVAL;
187 }
188
189 hgsmi_buffer_free(ctx, p);
190
191 return rc;
192}
193
194/**
195 * Report the guest cursor position. The host may wish to use this information
196 * to re-position its own cursor (though this is currently unlikely). The
197 * current host cursor position is returned.
198 * @param ctx The context containing the heap used.
199 * @param report_position Are we reporting a position?
200 * @param x Guest cursor X position.
201 * @param y Guest cursor Y position.
202 * @param x_host Host cursor X position is stored here. Optional.
203 * @param y_host Host cursor Y position is stored here. Optional.
204 * @returns 0 on success, -errno on failure
205 */
206int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
207 u32 x, u32 y, u32 *x_host, u32 *y_host)
208{
209 struct vbva_cursor_position *p;
210
211 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
212 VBVA_CURSOR_POSITION);
213 if (!p)
214 return -ENOMEM;
215
216 p->report_position = report_position;
217 p->x = x;
218 p->y = y;
219
220 hgsmi_buffer_submit(ctx, p);
221
222 *x_host = p->x;
223 *y_host = p->y;
224
225 hgsmi_buffer_free(ctx, p);
226
227 return 0;
228}
229
230/**
231 * @todo Mouse pointer position to be read from VMMDev memory, address of the
232 * memory region can be queried from VMMDev via an IOCTL. This VMMDev memory
233 * region will contain host information which is needed by the guest.
234 *
235 * Reading will not cause a switch to the host.
236 *
237 * Have to take into account:
238 * * synchronization: host must write to the memory only from EMT,
239 * large structures must be read under flag, which tells the host
240 * that the guest is currently reading the memory (OWNER flag?).
241 * * guest writes: may be allocate a page for the host info and make
242 * the page readonly for the guest.
243 * * the information should be available only for additions drivers.
244 * * VMMDev additions driver will inform the host which version of the info
245 * it expects, host must support all versions.
246 */
diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..8e6d9e11a69c
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __HGSMI_CH_SETUP_H__
24#define __HGSMI_CH_SETUP_H__
25
26/*
27 * Tell the host the location of hgsmi_host_flags structure, where the host
28 * can write information about pending buffers, etc, and which can be quickly
29 * polled by the guest without a need to port IO.
30 */
31#define HGSMI_CC_HOST_FLAGS_LOCATION 0
32
33struct hgsmi_buffer_location {
34 u32 buf_location;
35 u32 buf_len;
36} __packed;
37
38/* HGSMI setup and configuration data structures. */
39/* host->guest commands pending, should be accessed under FIFO lock only */
40#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
41/* IRQ is fired, should be accessed under VGAState::lock only */
42#define HGSMIHOSTFLAGS_IRQ 0x02u
43/* vsync interrupt flag, should be accessed under VGAState::lock only */
44#define HGSMIHOSTFLAGS_VSYNC 0x10u
45/** monitor hotplug flag, should be accessed under VGAState::lock only */
46#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
47/**
48 * Cursor capability state change flag, should be accessed under
49 * VGAState::lock only. @see vbva_conf32.
50 */
51#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
52
53struct hgsmi_host_flags {
54 /*
55 * Host flags can be accessed and modified in multiple threads
56 * concurrently, e.g. CrOpenGL HGCM and GUI threads when completing
57 * HGSMI 3D and Video Accel respectively, EMT thread when dealing with
58 * HGSMI command processing, etc.
59 * Besides settings/cleaning flags atomically, some flags have their
60 * own special sync restrictions, see comments for flags above.
61 */
62 u32 host_flags;
63 u32 reserved[3];
64} __packed;
65
66#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/staging/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..a2a34b2167b4
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __HGSMI_CHANNELS_H__
24#define __HGSMI_CHANNELS_H__
25
26/*
27 * Each channel has an 8 bit identifier. There are a number of predefined
28 * (hardcoded) channels.
29 *
30 * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
31 * to a free 16 bit numerical value. values are allocated in range
32 * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
33 */
34
35/* A reserved channel value */
36#define HGSMI_CH_RESERVED 0x00
37/* HGCMI: setup and configuration */
38#define HGSMI_CH_HGSMI 0x01
39/* Graphics: VBVA */
40#define HGSMI_CH_VBVA 0x02
41/* Graphics: Seamless with a single guest region */
42#define HGSMI_CH_SEAMLESS 0x03
43/* Graphics: Seamless with separate host windows */
44#define HGSMI_CH_SEAMLESS2 0x04
45/* Graphics: OpenGL HW acceleration */
46#define HGSMI_CH_OPENGL 0x05
47
48/* The first channel index to be used for string mappings (inclusive) */
49#define HGSMI_CH_STRING_FIRST 0x20
50/* The last channel index for string mappings (inclusive) */
51#define HGSMI_CH_STRING_LAST 0xff
52
53#endif
diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/staging/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..5b21fb974d20
--- /dev/null
+++ b/drivers/staging/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __HGSMI_DEFS_H__
24#define __HGSMI_DEFS_H__
25
26/* Buffer sequence type mask. */
27#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
28/* Single buffer, not a part of a sequence. */
29#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
30/* The first buffer in a sequence. */
31#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
32/* A middle buffer in a sequence. */
33#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
34/* The last buffer in a sequence. */
35#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
36
37/* 16 bytes buffer header. */
38struct hgsmi_buffer_header {
39 u32 data_size; /* Size of data that follows the header. */
40 u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
41 u8 channel; /* The channel the data must be routed to. */
42 u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
43
44 union {
45 /* Opaque placeholder to make the union 8 bytes. */
46 u8 header_data[8];
47
48 /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
49 struct {
50 u32 reserved1; /* A reserved field, initialize to 0. */
51 u32 reserved2; /* A reserved field, initialize to 0. */
52 } buffer;
53
54 /* HGSMI_BUFFER_HEADER_F_SEQ_START */
55 struct {
56 /* Must be the same for all buffers in the sequence. */
57 u32 sequence_number;
58 /* The total size of the sequence. */
59 u32 sequence_size;
60 } sequence_start;
61
62 /*
63 * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
64 * HGSMI_BUFFER_HEADER_F_SEQ_END
65 */
66 struct {
67 /* Must be the same for all buffers in the sequence. */
68 u32 sequence_number;
69 /* Data offset in the entire sequence. */
70 u32 sequence_offset;
71 } sequence_continue;
72 } u;
73} __packed;
74
75/* 8 bytes buffer tail. */
76struct hgsmi_buffer_tail {
77 /* Reserved, must be initialized to 0. */
78 u32 reserved;
79 /*
80 * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
81 * Over the header, offset and for first 4 bytes of the tail.
82 */
83 u32 checksum;
84} __packed;
85
86/*
87 * The size of the array of channels. Array indexes are u8.
88 * Note: the value must not be changed.
89 */
90#define HGSMI_NUMBER_OF_CHANNELS 0x100
91
92#endif
diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/staging/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7616b8aab23a
--- /dev/null
+++ b/drivers/staging/vboxvideo/modesetting.c
@@ -0,0 +1,142 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vbox_drv.h"
24#include "vbox_err.h"
25#include "vboxvideo_guest.h"
26#include "vboxvideo_vbe.h"
27#include "hgsmi_channels.h"
28
29/**
30 * Set a video mode via an HGSMI request. The views must have been
31 * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
32 * set on the first display then it must be set first using registers.
33 * @param ctx The context containing the heap to use
34 * @param display The screen number
35 * @param origin_x The horizontal displacement relative to the first scrn
36 * @param origin_y The vertical displacement relative to the first screen
37 * @param start_offset The offset of the visible area of the framebuffer
38 * relative to the framebuffer start
39 * @param pitch The offset in bytes between the starts of two adjecent
40 * scan lines in video RAM
41 * @param width The mode width
42 * @param height The mode height
43 * @param bpp The colour depth of the mode
44 * @param flags Flags
45 */
46void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
47 s32 origin_x, s32 origin_y, u32 start_offset,
48 u32 pitch, u32 width, u32 height,
49 u16 bpp, u16 flags)
50{
51 struct vbva_infoscreen *p;
52
53 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
54 VBVA_INFO_SCREEN);
55 if (!p)
56 return;
57
58 p->view_index = display;
59 p->origin_x = origin_x;
60 p->origin_y = origin_y;
61 p->start_offset = start_offset;
62 p->line_size = pitch;
63 p->width = width;
64 p->height = height;
65 p->bits_per_pixel = bpp;
66 p->flags = flags;
67
68 hgsmi_buffer_submit(ctx, p);
69 hgsmi_buffer_free(ctx, p);
70}
71
72/**
73 * Report the rectangle relative to which absolute pointer events should be
74 * expressed. This information remains valid until the next VBVA resize event
75 * for any screen, at which time it is reset to the bounding rectangle of all
76 * virtual screens.
77 * @param ctx The context containing the heap to use.
78 * @param origin_x Upper left X co-ordinate relative to the first screen.
79 * @param origin_y Upper left Y co-ordinate relative to the first screen.
80 * @param width Rectangle width.
81 * @param height Rectangle height.
82 * @returns 0 on success, -errno on failure
83 */
84int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
85 u32 width, u32 height)
86{
87 struct vbva_report_input_mapping *p;
88
89 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
90 VBVA_REPORT_INPUT_MAPPING);
91 if (!p)
92 return -ENOMEM;
93
94 p->x = origin_x;
95 p->y = origin_y;
96 p->cx = width;
97 p->cy = height;
98
99 hgsmi_buffer_submit(ctx, p);
100 hgsmi_buffer_free(ctx, p);
101
102 return 0;
103}
104
105/**
106 * Get most recent video mode hints.
107 * @param ctx The context containing the heap to use.
108 * @param screens The number of screens to query hints for, starting at 0.
109 * @param hints Array of vbva_modehint structures for receiving the hints.
110 * @returns 0 on success, -errno on failure
111 */
112int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
113 struct vbva_modehint *hints)
114{
115 struct vbva_query_mode_hints *p;
116 size_t size;
117
118 if (WARN_ON(!hints))
119 return -EINVAL;
120
121 size = screens * sizeof(struct vbva_modehint);
122 p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
123 VBVA_QUERY_MODE_HINTS);
124 if (!p)
125 return -ENOMEM;
126
127 p->hints_queried_count = screens;
128 p->hint_structure_guest_size = sizeof(struct vbva_modehint);
129 p->rc = VERR_NOT_SUPPORTED;
130
131 hgsmi_buffer_submit(ctx, p);
132
133 if (RT_FAILURE(p->rc)) {
134 hgsmi_buffer_free(ctx, p);
135 return -EIO;
136 }
137
138 memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
139 hgsmi_buffer_free(ctx, p);
140
141 return 0;
142}
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..92ae1560a16d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -0,0 +1,286 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_drv.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
29 */
30#include <linux/module.h>
31#include <linux/console.h>
32#include <linux/vt_kern.h>
33
34#include <drm/drmP.h>
35#include <drm/drm_crtc_helper.h>
36
37#include "vbox_drv.h"
38
39int vbox_modeset = -1;
40
41MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
42module_param_named(modeset, vbox_modeset, int, 0400);
43
44static struct drm_driver driver;
45
46static const struct pci_device_id pciidlist[] = {
47 { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
48 { 0, 0, 0},
49};
50MODULE_DEVICE_TABLE(pci, pciidlist);
51
52static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
53{
54 return drm_get_pci_dev(pdev, ent, &driver);
55}
56
57static void vbox_pci_remove(struct pci_dev *pdev)
58{
59 struct drm_device *dev = pci_get_drvdata(pdev);
60
61 drm_put_dev(dev);
62}
63
64static int vbox_drm_freeze(struct drm_device *dev)
65{
66 struct vbox_private *vbox = dev->dev_private;
67
68 drm_kms_helper_poll_disable(dev);
69
70 pci_save_state(dev->pdev);
71
72 drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true);
73
74 return 0;
75}
76
77static int vbox_drm_thaw(struct drm_device *dev)
78{
79 struct vbox_private *vbox = dev->dev_private;
80
81 drm_mode_config_reset(dev);
82 drm_helper_resume_force_mode(dev);
83 drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false);
84
85 return 0;
86}
87
88static int vbox_drm_resume(struct drm_device *dev)
89{
90 int ret;
91
92 if (pci_enable_device(dev->pdev))
93 return -EIO;
94
95 ret = vbox_drm_thaw(dev);
96 if (ret)
97 return ret;
98
99 drm_kms_helper_poll_enable(dev);
100
101 return 0;
102}
103
104static int vbox_pm_suspend(struct device *dev)
105{
106 struct pci_dev *pdev = to_pci_dev(dev);
107 struct drm_device *ddev = pci_get_drvdata(pdev);
108 int error;
109
110 error = vbox_drm_freeze(ddev);
111 if (error)
112 return error;
113
114 pci_disable_device(pdev);
115 pci_set_power_state(pdev, PCI_D3hot);
116
117 return 0;
118}
119
120static int vbox_pm_resume(struct device *dev)
121{
122 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
123
124 return vbox_drm_resume(ddev);
125}
126
127static int vbox_pm_freeze(struct device *dev)
128{
129 struct pci_dev *pdev = to_pci_dev(dev);
130 struct drm_device *ddev = pci_get_drvdata(pdev);
131
132 if (!ddev || !ddev->dev_private)
133 return -ENODEV;
134
135 return vbox_drm_freeze(ddev);
136}
137
138static int vbox_pm_thaw(struct device *dev)
139{
140 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
141
142 return vbox_drm_thaw(ddev);
143}
144
145static int vbox_pm_poweroff(struct device *dev)
146{
147 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
148
149 return vbox_drm_freeze(ddev);
150}
151
152static const struct dev_pm_ops vbox_pm_ops = {
153 .suspend = vbox_pm_suspend,
154 .resume = vbox_pm_resume,
155 .freeze = vbox_pm_freeze,
156 .thaw = vbox_pm_thaw,
157 .poweroff = vbox_pm_poweroff,
158 .restore = vbox_pm_resume,
159};
160
161static struct pci_driver vbox_pci_driver = {
162 .name = DRIVER_NAME,
163 .id_table = pciidlist,
164 .probe = vbox_pci_probe,
165 .remove = vbox_pci_remove,
166 .driver.pm = &vbox_pm_ops,
167};
168
169static const struct file_operations vbox_fops = {
170 .owner = THIS_MODULE,
171 .open = drm_open,
172 .release = drm_release,
173 .unlocked_ioctl = drm_ioctl,
174 .mmap = vbox_mmap,
175 .poll = drm_poll,
176#ifdef CONFIG_COMPAT
177 .compat_ioctl = drm_compat_ioctl,
178#endif
179 .read = drm_read,
180};
181
182static int vbox_master_set(struct drm_device *dev,
183 struct drm_file *file_priv, bool from_open)
184{
185 struct vbox_private *vbox = dev->dev_private;
186
187 /*
188 * We do not yet know whether the new owner can handle hotplug, so we
189 * do not advertise dynamic modes on the first query and send a
190 * tentative hotplug notification after that to see if they query again.
191 */
192 vbox->initial_mode_queried = false;
193
194 mutex_lock(&vbox->hw_mutex);
195 /*
196 * Disable VBVA when someone releases master in case the next person
197 * tries tries to do VESA.
198 */
199 /** @todo work out if anyone is likely to and whether it will work. */
200 /*
201 * Update: we also disable it because if the new master does not do
202 * dirty rectangle reporting (e.g. old versions of Plymouth) then at
203 * least the first screen will still be updated. We enable it as soon
204 * as we receive a dirty rectangle report.
205 */
206 vbox_disable_accel(vbox);
207 mutex_unlock(&vbox->hw_mutex);
208
209 return 0;
210}
211
212static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv)
213{
214 struct vbox_private *vbox = dev->dev_private;
215
216 /* See vbox_master_set() */
217 vbox->initial_mode_queried = false;
218
219 mutex_lock(&vbox->hw_mutex);
220 vbox_disable_accel(vbox);
221 mutex_unlock(&vbox->hw_mutex);
222}
223
224static struct drm_driver driver = {
225 .driver_features =
226 DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
227 DRIVER_PRIME,
228 .dev_priv_size = 0,
229
230 .load = vbox_driver_load,
231 .unload = vbox_driver_unload,
232 .lastclose = vbox_driver_lastclose,
233 .master_set = vbox_master_set,
234 .master_drop = vbox_master_drop,
235 .set_busid = drm_pci_set_busid,
236
237 .fops = &vbox_fops,
238 .irq_handler = vbox_irq_handler,
239 .name = DRIVER_NAME,
240 .desc = DRIVER_DESC,
241 .date = DRIVER_DATE,
242 .major = DRIVER_MAJOR,
243 .minor = DRIVER_MINOR,
244 .patchlevel = DRIVER_PATCHLEVEL,
245
246 .gem_free_object = vbox_gem_free_object,
247 .dumb_create = vbox_dumb_create,
248 .dumb_map_offset = vbox_dumb_mmap_offset,
249 .dumb_destroy = drm_gem_dumb_destroy,
250 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
251 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
252 .gem_prime_export = drm_gem_prime_export,
253 .gem_prime_import = drm_gem_prime_import,
254 .gem_prime_pin = vbox_gem_prime_pin,
255 .gem_prime_unpin = vbox_gem_prime_unpin,
256 .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
257 .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
258 .gem_prime_vmap = vbox_gem_prime_vmap,
259 .gem_prime_vunmap = vbox_gem_prime_vunmap,
260 .gem_prime_mmap = vbox_gem_prime_mmap,
261};
262
263static int __init vbox_init(void)
264{
265#ifdef CONFIG_VGA_CONSOLE
266 if (vgacon_text_force() && vbox_modeset == -1)
267 return -EINVAL;
268#endif
269
270 if (vbox_modeset == 0)
271 return -EINVAL;
272
273 return drm_pci_init(&driver, &vbox_pci_driver);
274}
275
276static void __exit vbox_exit(void)
277{
278 drm_pci_exit(&driver, &vbox_pci_driver);
279}
280
281module_init(vbox_init);
282module_exit(vbox_exit);
283
284MODULE_AUTHOR("Oracle Corporation");
285MODULE_DESCRIPTION(DRIVER_DESC);
286MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..4b9302703b36
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -0,0 +1,296 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_drv.h
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
29 */
30#ifndef __VBOX_DRV_H__
31#define __VBOX_DRV_H__
32
33#include <linux/genalloc.h>
34#include <linux/io.h>
35#include <linux/string.h>
36#include <linux/version.h>
37
38#include <drm/drmP.h>
39#include <drm/drm_encoder.h>
40#include <drm/drm_fb_helper.h>
41#include <drm/drm_gem.h>
42
43#include <drm/ttm/ttm_bo_api.h>
44#include <drm/ttm/ttm_bo_driver.h>
45#include <drm/ttm/ttm_placement.h>
46#include <drm/ttm/ttm_memory.h>
47#include <drm/ttm/ttm_module.h>
48
49#include "vboxvideo_guest.h"
50#include "vboxvideo_vbe.h"
51#include "hgsmi_ch_setup.h"
52
53#define DRIVER_NAME "vboxvideo"
54#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
55#define DRIVER_DATE "20130823"
56
57#define DRIVER_MAJOR 1
58#define DRIVER_MINOR 0
59#define DRIVER_PATCHLEVEL 0
60
61#define VBOX_MAX_CURSOR_WIDTH 64
62#define VBOX_MAX_CURSOR_HEIGHT 64
63#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
64#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
65
66#define VBOX_MAX_SCREENS 32
67
68#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
69 VBVA_ADAPTER_INFORMATION_SIZE)
70#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
71#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
72 sizeof(struct hgsmi_host_flags))
73#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
74
75struct vbox_fbdev;
76
77struct vbox_private {
78 struct drm_device *dev;
79
80 u8 __iomem *guest_heap;
81 u8 __iomem *vbva_buffers;
82 struct gen_pool *guest_pool;
83 struct vbva_buf_ctx *vbva_info;
84 bool any_pitch;
85 u32 num_crtcs;
86 /** Amount of available VRAM, including space used for buffers. */
87 u32 full_vram_size;
88 /** Amount of available VRAM, not including space used for buffers. */
89 u32 available_vram_size;
90 /** Array of structures for receiving mode hints. */
91 struct vbva_modehint *last_mode_hints;
92
93 struct vbox_fbdev *fbdev;
94
95 int fb_mtrr;
96
97 struct {
98 struct drm_global_reference mem_global_ref;
99 struct ttm_bo_global_ref bo_global_ref;
100 struct ttm_bo_device bdev;
101 } ttm;
102
103 struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
104 /**
105 * We decide whether or not user-space supports display hot-plug
106 * depending on whether they react to a hot-plug event after the initial
107 * mode query.
108 */
109 bool initial_mode_queried;
110 struct work_struct hotplug_work;
111 u32 input_mapping_width;
112 u32 input_mapping_height;
113 /**
114 * Is user-space using an X.Org-style layout of one large frame-buffer
115 * encompassing all screen ones or is the fbdev console active?
116 */
117 bool single_framebuffer;
118 u32 cursor_width;
119 u32 cursor_height;
120 u32 cursor_hot_x;
121 u32 cursor_hot_y;
122 size_t cursor_data_size;
123 u8 cursor_data[CURSOR_DATA_SIZE];
124};
125
126#undef CURSOR_PIXEL_COUNT
127#undef CURSOR_DATA_SIZE
128
129int vbox_driver_load(struct drm_device *dev, unsigned long flags);
130void vbox_driver_unload(struct drm_device *dev);
131void vbox_driver_lastclose(struct drm_device *dev);
132
133struct vbox_gem_object;
134
135struct vbox_connector {
136 struct drm_connector base;
137 char name[32];
138 struct vbox_crtc *vbox_crtc;
139 struct {
140 u16 width;
141 u16 height;
142 bool disconnected;
143 } mode_hint;
144};
145
146struct vbox_crtc {
147 struct drm_crtc base;
148 bool blanked;
149 bool disconnected;
150 unsigned int crtc_id;
151 u32 fb_offset;
152 bool cursor_enabled;
153 u16 x_hint;
154 u16 y_hint;
155};
156
157struct vbox_encoder {
158 struct drm_encoder base;
159};
160
161struct vbox_framebuffer {
162 struct drm_framebuffer base;
163 struct drm_gem_object *obj;
164};
165
166struct vbox_fbdev {
167 struct drm_fb_helper helper;
168 struct vbox_framebuffer afb;
169 int size;
170 struct ttm_bo_kmap_obj mapping;
171 int x1, y1, x2, y2; /* dirty rect */
172 spinlock_t dirty_lock;
173};
174
175#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
176#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
177#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
178#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
179
180int vbox_mode_init(struct drm_device *dev);
181void vbox_mode_fini(struct drm_device *dev);
182
183#define DRM_MODE_FB_CMD drm_mode_fb_cmd2
184#define CRTC_FB(crtc) ((crtc)->primary->fb)
185
186void vbox_enable_accel(struct vbox_private *vbox);
187void vbox_disable_accel(struct vbox_private *vbox);
188void vbox_report_caps(struct vbox_private *vbox);
189
190void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
191 struct drm_clip_rect *rects,
192 unsigned int num_rects);
193
194int vbox_framebuffer_init(struct drm_device *dev,
195 struct vbox_framebuffer *vbox_fb,
196 const struct DRM_MODE_FB_CMD *mode_cmd,
197 struct drm_gem_object *obj);
198
199int vbox_fbdev_init(struct drm_device *dev);
200void vbox_fbdev_fini(struct drm_device *dev);
201void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr);
202
203struct vbox_bo {
204 struct ttm_buffer_object bo;
205 struct ttm_placement placement;
206 struct ttm_bo_kmap_obj kmap;
207 struct drm_gem_object gem;
208 struct ttm_place placements[3];
209 int pin_count;
210};
211
212#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
213
214static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
215{
216 return container_of(bo, struct vbox_bo, bo);
217}
218
219#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
220
221int vbox_dumb_create(struct drm_file *file,
222 struct drm_device *dev,
223 struct drm_mode_create_dumb *args);
224
225void vbox_gem_free_object(struct drm_gem_object *obj);
226int vbox_dumb_mmap_offset(struct drm_file *file,
227 struct drm_device *dev,
228 u32 handle, u64 *offset);
229
230#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT)
231
232int vbox_mm_init(struct vbox_private *vbox);
233void vbox_mm_fini(struct vbox_private *vbox);
234
235int vbox_bo_create(struct drm_device *dev, int size, int align,
236 u32 flags, struct vbox_bo **pvboxbo);
237
238int vbox_gem_create(struct drm_device *dev,
239 u32 size, bool iskernel, struct drm_gem_object **obj);
240
241int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
242int vbox_bo_unpin(struct vbox_bo *bo);
243
244static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
245{
246 int ret;
247
248 ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
249 if (ret) {
250 if (ret != -ERESTARTSYS && ret != -EBUSY)
251 DRM_ERROR("reserve failed %p\n", bo);
252 return ret;
253 }
254 return 0;
255}
256
257static inline void vbox_bo_unreserve(struct vbox_bo *bo)
258{
259 ttm_bo_unreserve(&bo->bo);
260}
261
262void vbox_ttm_placement(struct vbox_bo *bo, int domain);
263int vbox_bo_push_sysram(struct vbox_bo *bo);
264int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
265
266/* vbox_prime.c */
267int vbox_gem_prime_pin(struct drm_gem_object *obj);
268void vbox_gem_prime_unpin(struct drm_gem_object *obj);
269struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
270struct drm_gem_object *vbox_gem_prime_import_sg_table(
271 struct drm_device *dev, struct dma_buf_attachment *attach,
272 struct sg_table *table);
273void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
274void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
275int vbox_gem_prime_mmap(struct drm_gem_object *obj,
276 struct vm_area_struct *area);
277
278/* vbox_irq.c */
279int vbox_irq_init(struct vbox_private *vbox);
280void vbox_irq_fini(struct vbox_private *vbox);
281void vbox_report_hotplug(struct vbox_private *vbox);
282irqreturn_t vbox_irq_handler(int irq, void *arg);
283
284/* vbox_hgsmi.c */
285void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
286 u8 channel, u16 channel_info);
287void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
288int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
289
290static inline void vbox_write_ioport(u16 index, u16 data)
291{
292 outw(index, VBE_DISPI_IOPORT_INDEX);
293 outw(data, VBE_DISPI_IOPORT_DATA);
294}
295
296#endif
diff --git a/drivers/staging/vboxvideo/vbox_err.h b/drivers/staging/vboxvideo/vbox_err.h
new file mode 100644
index 000000000000..562db8630eb0
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_err.h
@@ -0,0 +1,50 @@
1/*
2 * Copyright (C) 2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VBOX_ERR_H__
24#define __VBOX_ERR_H__
25
26/**
27 * @name VirtualBox virtual-hardware error macros
28 * @{
29 */
30
31#define VINF_SUCCESS 0
32#define VERR_INVALID_PARAMETER (-2)
33#define VERR_INVALID_POINTER (-6)
34#define VERR_NO_MEMORY (-8)
35#define VERR_NOT_IMPLEMENTED (-12)
36#define VERR_INVALID_FUNCTION (-36)
37#define VERR_NOT_SUPPORTED (-37)
38#define VERR_TOO_MUCH_DATA (-42)
39#define VERR_INVALID_STATE (-79)
40#define VERR_OUT_OF_RESOURCES (-80)
41#define VERR_ALREADY_EXISTS (-105)
42#define VERR_INTERNAL_ERROR (-225)
43
44#define RT_SUCCESS_NP(rc) ((int)(rc) >= VINF_SUCCESS)
45#define RT_SUCCESS(rc) (likely(RT_SUCCESS_NP(rc)))
46#define RT_FAILURE(rc) (unlikely(!RT_SUCCESS_NP(rc)))
47
48/** @} */
49
50#endif
diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..35f6d9f8c203
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_fb.c
@@ -0,0 +1,412 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_fb.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>
27 * Michael Thayer <michael.thayer@oracle.com,
28 */
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/mm.h>
34#include <linux/tty.h>
35#include <linux/sysrq.h>
36#include <linux/delay.h>
37#include <linux/fb.h>
38#include <linux/init.h>
39
40#include <drm/drmP.h>
41#include <drm/drm_crtc.h>
42#include <drm/drm_fb_helper.h>
43#include <drm/drm_crtc_helper.h>
44
45#include "vbox_drv.h"
46#include "vboxvideo.h"
47
48#define VBOX_DIRTY_DELAY (HZ / 30)
49/**
50 * Tell the host about dirty rectangles to update.
51 */
52static void vbox_dirty_update(struct vbox_fbdev *fbdev,
53 int x, int y, int width, int height)
54{
55 struct drm_gem_object *obj;
56 struct vbox_bo *bo;
57 int ret = -EBUSY;
58 bool store_for_later = false;
59 int x2, y2;
60 unsigned long flags;
61 struct drm_clip_rect rect;
62
63 obj = fbdev->afb.obj;
64 bo = gem_to_vbox_bo(obj);
65
66 /*
67 * try and reserve the BO, if we fail with busy
68 * then the BO is being moved and we should
69 * store up the damage until later.
70 */
71 if (drm_can_sleep())
72 ret = vbox_bo_reserve(bo, true);
73 if (ret) {
74 if (ret != -EBUSY)
75 return;
76
77 store_for_later = true;
78 }
79
80 x2 = x + width - 1;
81 y2 = y + height - 1;
82 spin_lock_irqsave(&fbdev->dirty_lock, flags);
83
84 if (fbdev->y1 < y)
85 y = fbdev->y1;
86 if (fbdev->y2 > y2)
87 y2 = fbdev->y2;
88 if (fbdev->x1 < x)
89 x = fbdev->x1;
90 if (fbdev->x2 > x2)
91 x2 = fbdev->x2;
92
93 if (store_for_later) {
94 fbdev->x1 = x;
95 fbdev->x2 = x2;
96 fbdev->y1 = y;
97 fbdev->y2 = y2;
98 spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
99 return;
100 }
101
102 fbdev->x1 = INT_MAX;
103 fbdev->y1 = INT_MAX;
104 fbdev->x2 = 0;
105 fbdev->y2 = 0;
106
107 spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
108
109 /*
110 * Not sure why the original code subtracted 1 here, but I will keep
111 * it that way to avoid unnecessary differences.
112 */
113 rect.x1 = x;
114 rect.x2 = x2 + 1;
115 rect.y1 = y;
116 rect.y2 = y2 + 1;
117 vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
118
119 vbox_bo_unreserve(bo);
120}
121
122#ifdef CONFIG_FB_DEFERRED_IO
123static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist)
124{
125 struct vbox_fbdev *fbdev = info->par;
126 unsigned long start, end, min, max;
127 struct page *page;
128 int y1, y2;
129
130 min = ULONG_MAX;
131 max = 0;
132 list_for_each_entry(page, pagelist, lru) {
133 start = page->index << PAGE_SHIFT;
134 end = start + PAGE_SIZE - 1;
135 min = min(min, start);
136 max = max(max, end);
137 }
138
139 if (min < max) {
140 y1 = min / info->fix.line_length;
141 y2 = (max / info->fix.line_length) + 1;
142 DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n",
143 __func__, y1, info->var.xres, y2 - y1 - 1);
144 vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1);
145 }
146}
147
148static struct fb_deferred_io vbox_defio = {
149 .delay = VBOX_DIRTY_DELAY,
150 .deferred_io = vbox_deferred_io,
151};
152#endif
153
154static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
155{
156 struct vbox_fbdev *fbdev = info->par;
157
158 sys_fillrect(info, rect);
159 vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height);
160}
161
162static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area)
163{
164 struct vbox_fbdev *fbdev = info->par;
165
166 sys_copyarea(info, area);
167 vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height);
168}
169
170static void vbox_imageblit(struct fb_info *info, const struct fb_image *image)
171{
172 struct vbox_fbdev *fbdev = info->par;
173
174 sys_imageblit(info, image);
175 vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
176 image->height);
177}
178
179static struct fb_ops vboxfb_ops = {
180 .owner = THIS_MODULE,
181 .fb_check_var = drm_fb_helper_check_var,
182 .fb_set_par = drm_fb_helper_set_par,
183 .fb_fillrect = vbox_fillrect,
184 .fb_copyarea = vbox_copyarea,
185 .fb_imageblit = vbox_imageblit,
186 .fb_pan_display = drm_fb_helper_pan_display,
187 .fb_blank = drm_fb_helper_blank,
188 .fb_setcmap = drm_fb_helper_setcmap,
189 .fb_debug_enter = drm_fb_helper_debug_enter,
190 .fb_debug_leave = drm_fb_helper_debug_leave,
191};
192
193static int vboxfb_create_object(struct vbox_fbdev *fbdev,
194 struct DRM_MODE_FB_CMD *mode_cmd,
195 struct drm_gem_object **gobj_p)
196{
197 struct drm_device *dev = fbdev->helper.dev;
198 u32 size;
199 struct drm_gem_object *gobj;
200 u32 pitch = mode_cmd->pitches[0];
201 int ret;
202
203 size = pitch * mode_cmd->height;
204 ret = vbox_gem_create(dev, size, true, &gobj);
205 if (ret)
206 return ret;
207
208 *gobj_p = gobj;
209
210 return 0;
211}
212
213static int vboxfb_create(struct drm_fb_helper *helper,
214 struct drm_fb_helper_surface_size *sizes)
215{
216 struct vbox_fbdev *fbdev =
217 container_of(helper, struct vbox_fbdev, helper);
218 struct drm_device *dev = fbdev->helper.dev;
219 struct DRM_MODE_FB_CMD mode_cmd;
220 struct drm_framebuffer *fb;
221 struct fb_info *info;
222 struct device *device = &dev->pdev->dev;
223 struct drm_gem_object *gobj;
224 struct vbox_bo *bo;
225 int size, ret;
226 u32 pitch;
227
228 mode_cmd.width = sizes->surface_width;
229 mode_cmd.height = sizes->surface_height;
230 pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
231 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
232 sizes->surface_depth);
233 mode_cmd.pitches[0] = pitch;
234
235 size = pitch * mode_cmd.height;
236
237 ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
238 if (ret) {
239 DRM_ERROR("failed to create fbcon backing object %d\n", ret);
240 return ret;
241 }
242
243 ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
244 if (ret)
245 return ret;
246
247 bo = gem_to_vbox_bo(gobj);
248
249 ret = vbox_bo_reserve(bo, false);
250 if (ret)
251 return ret;
252
253 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL);
254 if (ret) {
255 vbox_bo_unreserve(bo);
256 return ret;
257 }
258
259 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
260 vbox_bo_unreserve(bo);
261 if (ret) {
262 DRM_ERROR("failed to kmap fbcon\n");
263 return ret;
264 }
265
266 info = framebuffer_alloc(0, device);
267 if (!info)
268 return -ENOMEM;
269 info->par = fbdev;
270
271 fbdev->size = size;
272
273 fb = &fbdev->afb.base;
274 fbdev->helper.fb = fb;
275 fbdev->helper.fbdev = info;
276
277 strcpy(info->fix.id, "vboxdrmfb");
278
279 /*
280 * The last flag forces a mode set on VT switches even if the kernel
281 * does not think it is needed.
282 */
283 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT |
284 FBINFO_MISC_ALWAYS_SETPAR;
285 info->fbops = &vboxfb_ops;
286
287 ret = fb_alloc_cmap(&info->cmap, 256, 0);
288 if (ret)
289 return -ENOMEM;
290
291 /*
292 * This seems to be done for safety checking that the framebuffer
293 * is not registered twice by different drivers.
294 */
295 info->apertures = alloc_apertures(1);
296 if (!info->apertures)
297 return -ENOMEM;
298 info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
299 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
300
301 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
302 drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width,
303 sizes->fb_height);
304
305 info->screen_base = bo->kmap.virtual;
306 info->screen_size = size;
307
308#ifdef CONFIG_FB_DEFERRED_IO
309 info->fbdefio = &vbox_defio;
310 fb_deferred_io_init(info);
311#endif
312
313 info->pixmap.flags = FB_PIXMAP_SYSTEM;
314
315 DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
316
317 return 0;
318}
319
320static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
321 u16 blue, int regno)
322{
323}
324
325static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
326 u16 *blue, int regno)
327{
328 *red = regno;
329 *green = regno;
330 *blue = regno;
331}
332
333static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
334 .gamma_set = vbox_fb_gamma_set,
335 .gamma_get = vbox_fb_gamma_get,
336 .fb_probe = vboxfb_create,
337};
338
339void vbox_fbdev_fini(struct drm_device *dev)
340{
341 struct vbox_private *vbox = dev->dev_private;
342 struct vbox_fbdev *fbdev = vbox->fbdev;
343 struct vbox_framebuffer *afb = &fbdev->afb;
344
345 drm_fb_helper_unregister_fbi(&fbdev->helper);
346
347 if (afb->obj) {
348 struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
349
350 if (!vbox_bo_reserve(bo, false)) {
351 if (bo->kmap.virtual)
352 ttm_bo_kunmap(&bo->kmap);
353 /*
354 * QXL does this, but is it really needed before
355 * freeing?
356 */
357 if (bo->pin_count)
358 vbox_bo_unpin(bo);
359 vbox_bo_unreserve(bo);
360 }
361 drm_gem_object_unreference_unlocked(afb->obj);
362 afb->obj = NULL;
363 }
364 drm_fb_helper_fini(&fbdev->helper);
365
366 drm_framebuffer_unregister_private(&afb->base);
367 drm_framebuffer_cleanup(&afb->base);
368}
369
370int vbox_fbdev_init(struct drm_device *dev)
371{
372 struct vbox_private *vbox = dev->dev_private;
373 struct vbox_fbdev *fbdev;
374 int ret;
375
376 fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL);
377 if (!fbdev)
378 return -ENOMEM;
379
380 vbox->fbdev = fbdev;
381 spin_lock_init(&fbdev->dirty_lock);
382
383 drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
384 ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs);
385 if (ret)
386 return ret;
387
388 ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
389 if (ret)
390 goto err_fini;
391
392 /* disable all the possible outputs/crtcs before entering KMS mode */
393 drm_helper_disable_unused_functions(dev);
394
395 ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
396 if (ret)
397 goto err_fini;
398
399 return 0;
400
401err_fini:
402 drm_fb_helper_fini(&fbdev->helper);
403 return ret;
404}
405
406void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr)
407{
408 struct fb_info *fbdev = vbox->fbdev->helper.fbdev;
409
410 fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr;
411 fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr;
412}
diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/staging/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..822fd31121cb
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
22 * of the Software.
23 *
24 * Authors: Hans de Goede <hdegoede@redhat.com>
25 */
26
27#include "vbox_drv.h"
28#include "vboxvideo_vbe.h"
29#include "hgsmi_defs.h"
30
31/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
32static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
33{
34 while (size--) {
35 hash += *data++;
36 hash += (hash << 10);
37 hash ^= (hash >> 6);
38 }
39
40 return hash;
41}
42
43static u32 hgsmi_hash_end(u32 hash)
44{
45 hash += (hash << 3);
46 hash ^= (hash >> 11);
47 hash += (hash << 15);
48
49 return hash;
50}
51
52/* Not really a checksum but that is the naming used in all vbox code */
53static u32 hgsmi_checksum(u32 offset,
54 const struct hgsmi_buffer_header *header,
55 const struct hgsmi_buffer_tail *tail)
56{
57 u32 checksum;
58
59 checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
60 checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
61 /* 4 -> Do not checksum the checksum itself */
62 checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
63
64 return hgsmi_hash_end(checksum);
65}
66
67void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
68 u8 channel, u16 channel_info)
69{
70 struct hgsmi_buffer_header *h;
71 struct hgsmi_buffer_tail *t;
72 size_t total_size;
73 dma_addr_t offset;
74
75 total_size = size + sizeof(*h) + sizeof(*t);
76 h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
77 if (!h)
78 return NULL;
79
80 t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
81
82 h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
83 h->data_size = size;
84 h->channel = channel;
85 h->channel_info = channel_info;
86 memset(&h->u.header_data, 0, sizeof(h->u.header_data));
87
88 t->reserved = 0;
89 t->checksum = hgsmi_checksum(offset, h, t);
90
91 return (u8 *)h + sizeof(*h);
92}
93
94void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
95{
96 struct hgsmi_buffer_header *h =
97 (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
98 size_t total_size = h->data_size + sizeof(*h) +
99 sizeof(struct hgsmi_buffer_tail);
100
101 gen_pool_free(guest_pool, (unsigned long)h, total_size);
102}
103
104int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
105{
106 phys_addr_t offset;
107
108 offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
109 sizeof(struct hgsmi_buffer_header));
110 outl(offset, VGA_PORT_HGSMI_GUEST);
111 /* Make the compiler aware that the host has changed memory. */
112 mb();
113
114 return 0;
115}
diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..3ca8bec62ac4
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_irq.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (C) 2016-2017 Oracle Corporation
3 * This file is based on qxl_irq.c
4 * Copyright 2013 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alon Levy
26 * Michael Thayer <michael.thayer@oracle.com,
27 * Hans de Goede <hdegoede@redhat.com>
28 */
29
30#include <drm/drm_crtc_helper.h>
31
32#include "vbox_drv.h"
33#include "vboxvideo.h"
34
35static void vbox_clear_irq(void)
36{
37 outl((u32)~0, VGA_PORT_HGSMI_HOST);
38}
39
40static u32 vbox_get_flags(struct vbox_private *vbox)
41{
42 return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
43}
44
45void vbox_report_hotplug(struct vbox_private *vbox)
46{
47 schedule_work(&vbox->hotplug_work);
48}
49
50irqreturn_t vbox_irq_handler(int irq, void *arg)
51{
52 struct drm_device *dev = (struct drm_device *)arg;
53 struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
54 u32 host_flags = vbox_get_flags(vbox);
55
56 if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
57 return IRQ_NONE;
58
59 /*
60 * Due to a bug in the initial host implementation of hot-plug irqs,
61 * the hot-plug and cursor capability flags were never cleared.
62 * Fortunately we can tell when they would have been set by checking
63 * that the VSYNC flag is not set.
64 */
65 if (host_flags &
66 (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
67 !(host_flags & HGSMIHOSTFLAGS_VSYNC))
68 vbox_report_hotplug(vbox);
69
70 vbox_clear_irq();
71
72 return IRQ_HANDLED;
73}
74
75/**
76 * Check that the position hints provided by the host are suitable for GNOME
77 * shell (i.e. all screens disjoint and hints for all enabled screens) and if
78 * not replace them with default ones. Providing valid hints improves the
79 * chances that we will get a known screen layout for pointer mapping.
80 */
81static void validate_or_set_position_hints(struct vbox_private *vbox)
82{
83 struct vbva_modehint *hintsi, *hintsj;
84 bool valid = true;
85 u16 currentx = 0;
86 int i, j;
87
88 for (i = 0; i < vbox->num_crtcs; ++i) {
89 for (j = 0; j < i; ++j) {
90 hintsi = &vbox->last_mode_hints[i];
91 hintsj = &vbox->last_mode_hints[j];
92
93 if (hintsi->enabled && hintsj->enabled) {
94 if (hintsi->dx >= 0xffff ||
95 hintsi->dy >= 0xffff ||
96 hintsj->dx >= 0xffff ||
97 hintsj->dy >= 0xffff ||
98 (hintsi->dx <
99 hintsj->dx + (hintsj->cx & 0x8fff) &&
100 hintsi->dx + (hintsi->cx & 0x8fff) >
101 hintsj->dx) ||
102 (hintsi->dy <
103 hintsj->dy + (hintsj->cy & 0x8fff) &&
104 hintsi->dy + (hintsi->cy & 0x8fff) >
105 hintsj->dy))
106 valid = false;
107 }
108 }
109 }
110 if (!valid)
111 for (i = 0; i < vbox->num_crtcs; ++i) {
112 if (vbox->last_mode_hints[i].enabled) {
113 vbox->last_mode_hints[i].dx = currentx;
114 vbox->last_mode_hints[i].dy = 0;
115 currentx +=
116 vbox->last_mode_hints[i].cx & 0x8fff;
117 }
118 }
119}
120
121/**
122 * Query the host for the most recent video mode hints.
123 */
124static void vbox_update_mode_hints(struct vbox_private *vbox)
125{
126 struct drm_device *dev = vbox->dev;
127 struct drm_connector *connector;
128 struct vbox_connector *vbox_conn;
129 struct vbva_modehint *hints;
130 u16 flags;
131 bool disconnected;
132 unsigned int crtc_id;
133 int ret;
134
135 ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
136 vbox->last_mode_hints);
137 if (ret) {
138 DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
139 return;
140 }
141
142 validate_or_set_position_hints(vbox);
143 drm_modeset_lock_all(dev);
144 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
145 vbox_conn = to_vbox_connector(connector);
146
147 hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
148 if (hints->magic != VBVAMODEHINT_MAGIC)
149 continue;
150
151 disconnected = !(hints->enabled);
152 crtc_id = vbox_conn->vbox_crtc->crtc_id;
153 vbox_conn->mode_hint.width = hints->cx & 0x8fff;
154 vbox_conn->mode_hint.height = hints->cy & 0x8fff;
155 vbox_conn->vbox_crtc->x_hint = hints->dx;
156 vbox_conn->vbox_crtc->y_hint = hints->dy;
157 vbox_conn->mode_hint.disconnected = disconnected;
158
159 if (vbox_conn->vbox_crtc->disconnected == disconnected)
160 continue;
161
162 if (disconnected)
163 flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
164 else
165 flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
166
167 hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
168 hints->cx * 4, hints->cx,
169 hints->cy, 0, flags);
170
171 vbox_conn->vbox_crtc->disconnected = disconnected;
172 }
173 drm_modeset_unlock_all(dev);
174}
175
176static void vbox_hotplug_worker(struct work_struct *work)
177{
178 struct vbox_private *vbox = container_of(work, struct vbox_private,
179 hotplug_work);
180
181 vbox_update_mode_hints(vbox);
182 drm_kms_helper_hotplug_event(vbox->dev);
183}
184
185int vbox_irq_init(struct vbox_private *vbox)
186{
187 INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
188 vbox_update_mode_hints(vbox);
189
190 return drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
191}
192
193void vbox_irq_fini(struct vbox_private *vbox)
194{
195 drm_irq_uninstall(vbox->dev);
196 flush_work(&vbox->hotplug_work);
197}
diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..d0c6ec75a3c7
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_main.c
@@ -0,0 +1,534 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_main.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 * Authors: Dave Airlie <airlied@redhat.com>,
27 * Michael Thayer <michael.thayer@oracle.com,
28 * Hans de Goede <hdegoede@redhat.com>
29 */
30#include <drm/drm_fb_helper.h>
31#include <drm/drm_crtc_helper.h>
32
33#include "vbox_drv.h"
34#include "vbox_err.h"
35#include "vboxvideo_guest.h"
36#include "vboxvideo_vbe.h"
37
38static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
39{
40 struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
41
42 if (vbox_fb->obj)
43 drm_gem_object_unreference_unlocked(vbox_fb->obj);
44
45 drm_framebuffer_cleanup(fb);
46 kfree(fb);
47}
48
49void vbox_enable_accel(struct vbox_private *vbox)
50{
51 unsigned int i;
52 struct vbva_buffer *vbva;
53
54 if (!vbox->vbva_info || !vbox->vbva_buffers) {
55 /* Should never happen... */
56 DRM_ERROR("vboxvideo: failed to set up VBVA.\n");
57 return;
58 }
59
60 for (i = 0; i < vbox->num_crtcs; ++i) {
61 if (vbox->vbva_info[i].vbva)
62 continue;
63
64 vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE;
65 if (!vbva_enable(&vbox->vbva_info[i],
66 vbox->guest_pool, vbva, i)) {
67 /* very old host or driver error. */
68 DRM_ERROR("vboxvideo: vbva_enable failed\n");
69 return;
70 }
71 }
72}
73
74void vbox_disable_accel(struct vbox_private *vbox)
75{
76 unsigned int i;
77
78 for (i = 0; i < vbox->num_crtcs; ++i)
79 vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
80}
81
82void vbox_report_caps(struct vbox_private *vbox)
83{
84 u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
85 VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
86
87 if (vbox->initial_mode_queried)
88 caps |= VBVACAPS_VIDEO_MODE_HINTS;
89
90 hgsmi_send_caps_info(vbox->guest_pool, caps);
91}
92
93/**
94 * Send information about dirty rectangles to VBVA. If necessary we enable
95 * VBVA first, as this is normally disabled after a change of master in case
96 * the new master does not send dirty rectangle information (is this even
97 * allowed?)
98 */
99void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
100 struct drm_clip_rect *rects,
101 unsigned int num_rects)
102{
103 struct vbox_private *vbox = fb->dev->dev_private;
104 struct drm_crtc *crtc;
105 unsigned int i;
106
107 mutex_lock(&vbox->hw_mutex);
108 list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
109 if (CRTC_FB(crtc) != fb)
110 continue;
111
112 vbox_enable_accel(vbox);
113
114 for (i = 0; i < num_rects; ++i) {
115 struct vbva_cmd_hdr cmd_hdr;
116 unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
117
118 if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) ||
119 (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) ||
120 (rects[i].x2 < crtc->x) ||
121 (rects[i].y2 < crtc->y))
122 continue;
123
124 cmd_hdr.x = (s16)rects[i].x1;
125 cmd_hdr.y = (s16)rects[i].y1;
126 cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
127 cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
128
129 if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
130 vbox->guest_pool))
131 continue;
132
133 vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
134 &cmd_hdr, sizeof(cmd_hdr));
135 vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
136 }
137 }
138 mutex_unlock(&vbox->hw_mutex);
139}
140
141static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
142 struct drm_file *file_priv,
143 unsigned int flags, unsigned int color,
144 struct drm_clip_rect *rects,
145 unsigned int num_rects)
146{
147 vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
148
149 return 0;
150}
151
152static const struct drm_framebuffer_funcs vbox_fb_funcs = {
153 .destroy = vbox_user_framebuffer_destroy,
154 .dirty = vbox_user_framebuffer_dirty,
155};
156
157int vbox_framebuffer_init(struct drm_device *dev,
158 struct vbox_framebuffer *vbox_fb,
159 const struct DRM_MODE_FB_CMD *mode_cmd,
160 struct drm_gem_object *obj)
161{
162 int ret;
163
164 drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd);
165 vbox_fb->obj = obj;
166 ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
167 if (ret) {
168 DRM_ERROR("framebuffer init failed %d\n", ret);
169 return ret;
170 }
171
172 return 0;
173}
174
175static struct drm_framebuffer *vbox_user_framebuffer_create(
176 struct drm_device *dev,
177 struct drm_file *filp,
178 const struct drm_mode_fb_cmd2 *mode_cmd)
179{
180 struct drm_gem_object *obj;
181 struct vbox_framebuffer *vbox_fb;
182 int ret = -ENOMEM;
183
184 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
185 if (!obj)
186 return ERR_PTR(-ENOENT);
187
188 vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
189 if (!vbox_fb)
190 goto err_unref_obj;
191
192 ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
193 if (ret)
194 goto err_free_vbox_fb;
195
196 return &vbox_fb->base;
197
198err_free_vbox_fb:
199 kfree(vbox_fb);
200err_unref_obj:
201 drm_gem_object_unreference_unlocked(obj);
202 return ERR_PTR(ret);
203}
204
205static const struct drm_mode_config_funcs vbox_mode_funcs = {
206 .fb_create = vbox_user_framebuffer_create,
207};
208
209static int vbox_accel_init(struct vbox_private *vbox)
210{
211 unsigned int i;
212
213 vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
214 sizeof(*vbox->vbva_info), GFP_KERNEL);
215 if (!vbox->vbva_info)
216 return -ENOMEM;
217
218 /* Take a command buffer for each screen from the end of usable VRAM. */
219 vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
220
221 vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0,
222 vbox->available_vram_size,
223 vbox->num_crtcs *
224 VBVA_MIN_BUFFER_SIZE);
225 if (!vbox->vbva_buffers)
226 return -ENOMEM;
227
228 for (i = 0; i < vbox->num_crtcs; ++i)
229 vbva_setup_buffer_context(&vbox->vbva_info[i],
230 vbox->available_vram_size +
231 i * VBVA_MIN_BUFFER_SIZE,
232 VBVA_MIN_BUFFER_SIZE);
233
234 return 0;
235}
236
237static void vbox_accel_fini(struct vbox_private *vbox)
238{
239 vbox_disable_accel(vbox);
240 pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers);
241}
242
243/** Do we support the 4.3 plus mode hint reporting interface? */
244static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
245{
246 u32 have_hints, have_cursor;
247 int ret;
248
249 ret = hgsmi_query_conf(vbox->guest_pool,
250 VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
251 &have_hints);
252 if (ret)
253 return false;
254
255 ret = hgsmi_query_conf(vbox->guest_pool,
256 VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
257 &have_cursor);
258 if (ret)
259 return false;
260
261 return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
262}
263
264static bool vbox_check_supported(u16 id)
265{
266 u16 dispi_id;
267
268 vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
269 dispi_id = inw(VBE_DISPI_IOPORT_DATA);
270
271 return dispi_id == id;
272}
273
274/**
275 * Set up our heaps and data exchange buffers in VRAM before handing the rest
276 * to the memory manager.
277 */
278static int vbox_hw_init(struct vbox_private *vbox)
279{
280 int ret = -ENOMEM;
281
282 vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
283 vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
284
285 DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
286
287 /* Map guest-heap at end of vram */
288 vbox->guest_heap =
289 pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox),
290 GUEST_HEAP_SIZE);
291 if (!vbox->guest_heap)
292 return -ENOMEM;
293
294 /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
295 vbox->guest_pool = gen_pool_create(4, -1);
296 if (!vbox->guest_pool)
297 goto err_unmap_guest_heap;
298
299 ret = gen_pool_add_virt(vbox->guest_pool,
300 (unsigned long)vbox->guest_heap,
301 GUEST_HEAP_OFFSET(vbox),
302 GUEST_HEAP_USABLE_SIZE, -1);
303 if (ret)
304 goto err_destroy_guest_pool;
305
306 ret = hgsmi_test_query_conf(vbox->guest_pool);
307 if (ret) {
308 DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
309 goto err_destroy_guest_pool;
310 }
311
312 /* Reduce available VRAM size to reflect the guest heap. */
313 vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
314 /* Linux drm represents monitors as a 32-bit array. */
315 hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
316 &vbox->num_crtcs);
317 vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
318
319 if (!have_hgsmi_mode_hints(vbox)) {
320 ret = -ENOTSUPP;
321 goto err_destroy_guest_pool;
322 }
323
324 vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs,
325 sizeof(struct vbva_modehint),
326 GFP_KERNEL);
327 if (!vbox->last_mode_hints) {
328 ret = -ENOMEM;
329 goto err_destroy_guest_pool;
330 }
331
332 ret = vbox_accel_init(vbox);
333 if (ret)
334 goto err_destroy_guest_pool;
335
336 return 0;
337
338err_destroy_guest_pool:
339 gen_pool_destroy(vbox->guest_pool);
340err_unmap_guest_heap:
341 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
342 return ret;
343}
344
345static void vbox_hw_fini(struct vbox_private *vbox)
346{
347 vbox_accel_fini(vbox);
348 gen_pool_destroy(vbox->guest_pool);
349 pci_iounmap(vbox->dev->pdev, vbox->guest_heap);
350}
351
352int vbox_driver_load(struct drm_device *dev, unsigned long flags)
353{
354 struct vbox_private *vbox;
355 int ret = 0;
356
357 if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
358 return -ENODEV;
359
360 vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL);
361 if (!vbox)
362 return -ENOMEM;
363
364 dev->dev_private = vbox;
365 vbox->dev = dev;
366
367 mutex_init(&vbox->hw_mutex);
368
369 ret = vbox_hw_init(vbox);
370 if (ret)
371 return ret;
372
373 ret = vbox_mm_init(vbox);
374 if (ret)
375 goto err_hw_fini;
376
377 drm_mode_config_init(dev);
378
379 dev->mode_config.funcs = (void *)&vbox_mode_funcs;
380 dev->mode_config.min_width = 64;
381 dev->mode_config.min_height = 64;
382 dev->mode_config.preferred_depth = 24;
383 dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
384 dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
385
386 ret = vbox_mode_init(dev);
387 if (ret)
388 goto err_drm_mode_cleanup;
389
390 ret = vbox_irq_init(vbox);
391 if (ret)
392 goto err_mode_fini;
393
394 ret = vbox_fbdev_init(dev);
395 if (ret)
396 goto err_irq_fini;
397
398 return 0;
399
400err_irq_fini:
401 vbox_irq_fini(vbox);
402err_mode_fini:
403 vbox_mode_fini(dev);
404err_drm_mode_cleanup:
405 drm_mode_config_cleanup(dev);
406 vbox_mm_fini(vbox);
407err_hw_fini:
408 vbox_hw_fini(vbox);
409 return ret;
410}
411
412void vbox_driver_unload(struct drm_device *dev)
413{
414 struct vbox_private *vbox = dev->dev_private;
415
416 vbox_fbdev_fini(dev);
417 vbox_irq_fini(vbox);
418 vbox_mode_fini(dev);
419 drm_mode_config_cleanup(dev);
420 vbox_mm_fini(vbox);
421 vbox_hw_fini(vbox);
422}
423
424/**
425 * @note this is described in the DRM framework documentation. AST does not
426 * have it, but we get an oops on driver unload if it is not present.
427 */
428void vbox_driver_lastclose(struct drm_device *dev)
429{
430 struct vbox_private *vbox = dev->dev_private;
431
432 if (vbox->fbdev)
433 drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
434}
435
436int vbox_gem_create(struct drm_device *dev,
437 u32 size, bool iskernel, struct drm_gem_object **obj)
438{
439 struct vbox_bo *vboxbo;
440 int ret;
441
442 *obj = NULL;
443
444 size = roundup(size, PAGE_SIZE);
445 if (size == 0)
446 return -EINVAL;
447
448 ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
449 if (ret) {
450 if (ret != -ERESTARTSYS)
451 DRM_ERROR("failed to allocate GEM object\n");
452 return ret;
453 }
454
455 *obj = &vboxbo->gem;
456
457 return 0;
458}
459
460int vbox_dumb_create(struct drm_file *file,
461 struct drm_device *dev, struct drm_mode_create_dumb *args)
462{
463 int ret;
464 struct drm_gem_object *gobj;
465 u32 handle;
466
467 args->pitch = args->width * ((args->bpp + 7) / 8);
468 args->size = args->pitch * args->height;
469
470 ret = vbox_gem_create(dev, args->size, false, &gobj);
471 if (ret)
472 return ret;
473
474 ret = drm_gem_handle_create(file, gobj, &handle);
475 drm_gem_object_unreference_unlocked(gobj);
476 if (ret)
477 return ret;
478
479 args->handle = handle;
480
481 return 0;
482}
483
484static void vbox_bo_unref(struct vbox_bo **bo)
485{
486 struct ttm_buffer_object *tbo;
487
488 if ((*bo) == NULL)
489 return;
490
491 tbo = &((*bo)->bo);
492 ttm_bo_unref(&tbo);
493 if (!tbo)
494 *bo = NULL;
495}
496
497void vbox_gem_free_object(struct drm_gem_object *obj)
498{
499 struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
500
501 vbox_bo_unref(&vbox_bo);
502}
503
504static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
505{
506 return drm_vma_node_offset_addr(&bo->bo.vma_node);
507}
508
509int
510vbox_dumb_mmap_offset(struct drm_file *file,
511 struct drm_device *dev,
512 u32 handle, u64 *offset)
513{
514 struct drm_gem_object *obj;
515 int ret;
516 struct vbox_bo *bo;
517
518 mutex_lock(&dev->struct_mutex);
519 obj = drm_gem_object_lookup(file, handle);
520 if (!obj) {
521 ret = -ENOENT;
522 goto out_unlock;
523 }
524
525 bo = gem_to_vbox_bo(obj);
526 *offset = vbox_bo_mmap_offset(bo);
527
528 drm_gem_object_unreference(obj);
529 ret = 0;
530
531out_unlock:
532 mutex_unlock(&dev->struct_mutex);
533 return ret;
534}
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..f2b85f3256fa
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -0,0 +1,877 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_mode.c
4 * Copyright 2012 Red Hat Inc.
5 * Parts based on xf86-video-ast
6 * Copyright (c) 2005 ASPEED Technology Inc.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
26 * of the Software.
27 *
28 */
29/*
30 * Authors: Dave Airlie <airlied@redhat.com>
31 * Michael Thayer <michael.thayer@oracle.com,
32 * Hans de Goede <hdegoede@redhat.com>
33 */
34#include <linux/export.h>
35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_plane_helper.h>
37
38#include "vbox_drv.h"
39#include "vboxvideo.h"
40#include "hgsmi_channels.h"
41
42static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
43 u32 handle, u32 width, u32 height,
44 s32 hot_x, s32 hot_y);
45static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
46
47/**
48 * Set a graphics mode. Poke any required values into registers, do an HGSMI
49 * mode set and tell the host we support advanced graphics functions.
50 */
51static void vbox_do_modeset(struct drm_crtc *crtc,
52 const struct drm_display_mode *mode)
53{
54 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
55 struct vbox_private *vbox;
56 int width, height, bpp, pitch;
57 unsigned int crtc_id;
58 u16 flags;
59 s32 x_offset, y_offset;
60
61 vbox = crtc->dev->dev_private;
62 width = mode->hdisplay ? mode->hdisplay : 640;
63 height = mode->vdisplay ? mode->vdisplay : 480;
64 crtc_id = vbox_crtc->crtc_id;
65 bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32;
66 pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
67 x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint;
68 y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint;
69
70 /*
71 * This is the old way of setting graphics modes. It assumed one screen
72 * and a frame-buffer at the start of video RAM. On older versions of
73 * VirtualBox, certain parts of the code still assume that the first
74 * screen is programmed this way, so try to fake it.
75 */
76 if (vbox_crtc->crtc_id == 0 && crtc->enabled &&
77 vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
78 vbox_crtc->fb_offset % (bpp / 8) == 0) {
79 vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
80 vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
81 vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
82 vbox_write_ioport(VBE_DISPI_INDEX_BPP,
83 CRTC_FB(crtc)->format->cpp[0] * 8);
84 vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
85 vbox_write_ioport(
86 VBE_DISPI_INDEX_X_OFFSET,
87 vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x);
88 vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
89 vbox_crtc->fb_offset / pitch + crtc->y);
90 }
91
92 flags = VBVA_SCREEN_F_ACTIVE;
93 flags |= (crtc->enabled && !vbox_crtc->blanked) ?
94 0 : VBVA_SCREEN_F_BLANK;
95 flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
96 hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
97 x_offset, y_offset,
98 crtc->x * bpp / 8 + crtc->y * pitch,
99 pitch, width, height,
100 vbox_crtc->blanked ? 0 : bpp, flags);
101}
102
103static int vbox_set_view(struct drm_crtc *crtc)
104{
105 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
106 struct vbox_private *vbox = crtc->dev->dev_private;
107 struct vbva_infoview *p;
108
109 /*
110 * Tell the host about the view. This design originally targeted the
111 * Windows XP driver architecture and assumed that each screen would
112 * have a dedicated frame buffer with the command buffer following it,
113 * the whole being a "view". The host works out which screen a command
114 * buffer belongs to by checking whether it is in the first view, then
115 * whether it is in the second and so on. The first match wins. We
116 * cheat around this by making the first view be the managed memory
117 * plus the first command buffer, the second the same plus the second
118 * buffer and so on.
119 */
120 p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
121 HGSMI_CH_VBVA, VBVA_INFO_VIEW);
122 if (!p)
123 return -ENOMEM;
124
125 p->view_index = vbox_crtc->crtc_id;
126 p->view_offset = vbox_crtc->fb_offset;
127 p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
128 vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
129 p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
130
131 hgsmi_buffer_submit(vbox->guest_pool, p);
132 hgsmi_buffer_free(vbox->guest_pool, p);
133
134 return 0;
135}
136
137static void vbox_crtc_load_lut(struct drm_crtc *crtc)
138{
139}
140
141static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
142{
143 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
144 struct vbox_private *vbox = crtc->dev->dev_private;
145
146 switch (mode) {
147 case DRM_MODE_DPMS_ON:
148 vbox_crtc->blanked = false;
149 break;
150 case DRM_MODE_DPMS_STANDBY:
151 case DRM_MODE_DPMS_SUSPEND:
152 case DRM_MODE_DPMS_OFF:
153 vbox_crtc->blanked = true;
154 break;
155 }
156
157 mutex_lock(&vbox->hw_mutex);
158 vbox_do_modeset(crtc, &crtc->hwmode);
159 mutex_unlock(&vbox->hw_mutex);
160}
161
162static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
163 const struct drm_display_mode *mode,
164 struct drm_display_mode *adjusted_mode)
165{
166 return true;
167}
168
169/*
170 * Try to map the layout of virtual screens to the range of the input device.
171 * Return true if we need to re-set the crtc modes due to screen offset
172 * changes.
173 */
174static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
175{
176 struct drm_crtc *crtci;
177 struct drm_connector *connectori;
178 struct drm_framebuffer *fb1 = NULL;
179 bool single_framebuffer = true;
180 bool old_single_framebuffer = vbox->single_framebuffer;
181 u16 width = 0, height = 0;
182
183 /*
184 * Are we using an X.Org-style single large frame-buffer for all crtcs?
185 * If so then screen layout can be deduced from the crtc offsets.
186 * Same fall-back if this is the fbdev frame-buffer.
187 */
188 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) {
189 if (!fb1) {
190 fb1 = CRTC_FB(crtci);
191 if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb)
192 break;
193 } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) {
194 single_framebuffer = false;
195 }
196 }
197 if (single_framebuffer) {
198 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
199 head) {
200 if (to_vbox_crtc(crtci)->crtc_id != 0)
201 continue;
202
203 vbox->single_framebuffer = true;
204 vbox->input_mapping_width = CRTC_FB(crtci)->width;
205 vbox->input_mapping_height = CRTC_FB(crtci)->height;
206 return old_single_framebuffer !=
207 vbox->single_framebuffer;
208 }
209 }
210 /* Otherwise calculate the total span of all screens. */
211 list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list,
212 head) {
213 struct vbox_connector *vbox_connector =
214 to_vbox_connector(connectori);
215 struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
216
217 width = max_t(u16, width, vbox_crtc->x_hint +
218 vbox_connector->mode_hint.width);
219 height = max_t(u16, height, vbox_crtc->y_hint +
220 vbox_connector->mode_hint.height);
221 }
222
223 vbox->single_framebuffer = false;
224 vbox->input_mapping_width = width;
225 vbox->input_mapping_height = height;
226
227 return old_single_framebuffer != vbox->single_framebuffer;
228}
229
230static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
231 struct drm_framebuffer *old_fb, int x, int y)
232{
233 struct vbox_private *vbox = crtc->dev->dev_private;
234 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
235 struct drm_gem_object *obj;
236 struct vbox_framebuffer *vbox_fb;
237 struct vbox_bo *bo;
238 int ret;
239 u64 gpu_addr;
240
241 /* Unpin the previous fb. */
242 if (old_fb) {
243 vbox_fb = to_vbox_framebuffer(old_fb);
244 obj = vbox_fb->obj;
245 bo = gem_to_vbox_bo(obj);
246 ret = vbox_bo_reserve(bo, false);
247 if (ret)
248 return ret;
249
250 vbox_bo_unpin(bo);
251 vbox_bo_unreserve(bo);
252 }
253
254 vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
255 obj = vbox_fb->obj;
256 bo = gem_to_vbox_bo(obj);
257
258 ret = vbox_bo_reserve(bo, false);
259 if (ret)
260 return ret;
261
262 ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
263 if (ret) {
264 vbox_bo_unreserve(bo);
265 return ret;
266 }
267
268 if (&vbox->fbdev->afb == vbox_fb)
269 vbox_fbdev_set_base(vbox, gpu_addr);
270 vbox_bo_unreserve(bo);
271
272 /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
273 vbox_crtc->fb_offset = gpu_addr;
274 if (vbox_set_up_input_mapping(vbox)) {
275 struct drm_crtc *crtci;
276
277 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
278 head) {
279 vbox_set_view(crtc);
280 vbox_do_modeset(crtci, &crtci->mode);
281 }
282 }
283
284 return 0;
285}
286
287static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
288 struct drm_framebuffer *old_fb)
289{
290 return vbox_crtc_do_set_base(crtc, old_fb, x, y);
291}
292
293static int vbox_crtc_mode_set(struct drm_crtc *crtc,
294 struct drm_display_mode *mode,
295 struct drm_display_mode *adjusted_mode,
296 int x, int y, struct drm_framebuffer *old_fb)
297{
298 struct vbox_private *vbox = crtc->dev->dev_private;
299 int ret;
300
301 vbox_crtc_mode_set_base(crtc, x, y, old_fb);
302
303 mutex_lock(&vbox->hw_mutex);
304 ret = vbox_set_view(crtc);
305 if (!ret)
306 vbox_do_modeset(crtc, mode);
307 hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
308 vbox->input_mapping_width,
309 vbox->input_mapping_height);
310 mutex_unlock(&vbox->hw_mutex);
311
312 return ret;
313}
314
315static void vbox_crtc_disable(struct drm_crtc *crtc)
316{
317}
318
319static void vbox_crtc_prepare(struct drm_crtc *crtc)
320{
321}
322
323static void vbox_crtc_commit(struct drm_crtc *crtc)
324{
325}
326
327static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
328 .dpms = vbox_crtc_dpms,
329 .mode_fixup = vbox_crtc_mode_fixup,
330 .mode_set = vbox_crtc_mode_set,
331 /* .mode_set_base = vbox_crtc_mode_set_base, */
332 .disable = vbox_crtc_disable,
333 .load_lut = vbox_crtc_load_lut,
334 .prepare = vbox_crtc_prepare,
335 .commit = vbox_crtc_commit,
336};
337
338static void vbox_crtc_reset(struct drm_crtc *crtc)
339{
340}
341
342static void vbox_crtc_destroy(struct drm_crtc *crtc)
343{
344 drm_crtc_cleanup(crtc);
345 kfree(crtc);
346}
347
348static const struct drm_crtc_funcs vbox_crtc_funcs = {
349 .cursor_move = vbox_cursor_move,
350 .cursor_set2 = vbox_cursor_set2,
351 .reset = vbox_crtc_reset,
352 .set_config = drm_crtc_helper_set_config,
353 /* .gamma_set = vbox_crtc_gamma_set, */
354 .destroy = vbox_crtc_destroy,
355};
356
357static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
358{
359 struct vbox_crtc *vbox_crtc;
360
361 vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
362 if (!vbox_crtc)
363 return NULL;
364
365 vbox_crtc->crtc_id = i;
366
367 drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
368 drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
369 drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
370
371 return vbox_crtc;
372}
373
374static void vbox_encoder_destroy(struct drm_encoder *encoder)
375{
376 drm_encoder_cleanup(encoder);
377 kfree(encoder);
378}
379
380static struct drm_encoder *vbox_best_single_encoder(struct drm_connector
381 *connector)
382{
383 int enc_id = connector->encoder_ids[0];
384
385 /* pick the encoder ids */
386 if (enc_id)
387 return drm_encoder_find(connector->dev, enc_id);
388
389 return NULL;
390}
391
392static const struct drm_encoder_funcs vbox_enc_funcs = {
393 .destroy = vbox_encoder_destroy,
394};
395
396static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
397{
398}
399
400static bool vbox_mode_fixup(struct drm_encoder *encoder,
401 const struct drm_display_mode *mode,
402 struct drm_display_mode *adjusted_mode)
403{
404 return true;
405}
406
407static void vbox_encoder_mode_set(struct drm_encoder *encoder,
408 struct drm_display_mode *mode,
409 struct drm_display_mode *adjusted_mode)
410{
411}
412
413static void vbox_encoder_prepare(struct drm_encoder *encoder)
414{
415}
416
417static void vbox_encoder_commit(struct drm_encoder *encoder)
418{
419}
420
421static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
422 .dpms = vbox_encoder_dpms,
423 .mode_fixup = vbox_mode_fixup,
424 .prepare = vbox_encoder_prepare,
425 .commit = vbox_encoder_commit,
426 .mode_set = vbox_encoder_mode_set,
427};
428
429static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
430 unsigned int i)
431{
432 struct vbox_encoder *vbox_encoder;
433
434 vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
435 if (!vbox_encoder)
436 return NULL;
437
438 drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
439 DRM_MODE_ENCODER_DAC, NULL);
440 drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
441
442 vbox_encoder->base.possible_crtcs = 1 << i;
443 return &vbox_encoder->base;
444}
445
446/**
447 * Generate EDID data with a mode-unique serial number for the virtual
448 * monitor to try to persuade Unity that different modes correspond to
449 * different monitors and it should not try to force the same resolution on
450 * them.
451 */
452static void vbox_set_edid(struct drm_connector *connector, int width,
453 int height)
454{
455 enum { EDID_SIZE = 128 };
456 unsigned char edid[EDID_SIZE] = {
457 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
458 0x58, 0x58, /* manufacturer (VBX) */
459 0x00, 0x00, /* product code */
460 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
461 0x01, /* week of manufacture */
462 0x00, /* year of manufacture */
463 0x01, 0x03, /* EDID version */
464 0x80, /* capabilities - digital */
465 0x00, /* horiz. res in cm, zero for projectors */
466 0x00, /* vert. res in cm */
467 0x78, /* display gamma (120 == 2.2). */
468 0xEE, /* features (standby, suspend, off, RGB, std */
469 /* colour space, preferred timing mode) */
470 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
471 /* chromaticity for standard colour space. */
472 0x00, 0x00, 0x00, /* no default timings */
473 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
474 0x01, 0x01,
475 0x01, 0x01, 0x01, 0x01, /* no standard timings */
476 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
477 0x02, 0x02,
478 /* descriptor block 1 goes below */
479 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
480 /* descriptor block 2, monitor ranges */
481 0x00, 0x00, 0x00, 0xFD, 0x00,
482 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
483 0x20, 0x20,
484 /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
485 0x20,
486 /* descriptor block 3, monitor name */
487 0x00, 0x00, 0x00, 0xFC, 0x00,
488 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
489 '\n',
490 /* descriptor block 4: dummy data */
491 0x00, 0x00, 0x00, 0x10, 0x00,
492 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
493 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
494 0x20,
495 0x00, /* number of extensions */
496 0x00 /* checksum goes here */
497 };
498 int clock = (width + 6) * (height + 6) * 60 / 10000;
499 unsigned int i, sum = 0;
500
501 edid[12] = width & 0xff;
502 edid[13] = width >> 8;
503 edid[14] = height & 0xff;
504 edid[15] = height >> 8;
505 edid[54] = clock & 0xff;
506 edid[55] = clock >> 8;
507 edid[56] = width & 0xff;
508 edid[58] = (width >> 4) & 0xf0;
509 edid[59] = height & 0xff;
510 edid[61] = (height >> 4) & 0xf0;
511 for (i = 0; i < EDID_SIZE - 1; ++i)
512 sum += edid[i];
513 edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
514 drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
515}
516
517static int vbox_get_modes(struct drm_connector *connector)
518{
519 struct vbox_connector *vbox_connector = NULL;
520 struct drm_display_mode *mode = NULL;
521 struct vbox_private *vbox = NULL;
522 unsigned int num_modes = 0;
523 int preferred_width, preferred_height;
524
525 vbox_connector = to_vbox_connector(connector);
526 vbox = connector->dev->dev_private;
527 /*
528 * Heuristic: we do not want to tell the host that we support dynamic
529 * resizing unless we feel confident that the user space client using
530 * the video driver can handle hot-plug events. So the first time modes
531 * are queried after a "master" switch we tell the host that we do not,
532 * and immediately after we send the client a hot-plug notification as
533 * a test to see if they will respond and query again.
534 * That is also the reason why capabilities are reported to the host at
535 * this place in the code rather than elsewhere.
536 * We need to report the flags location before reporting the IRQ
537 * capability.
538 */
539 hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
540 HOST_FLAGS_OFFSET);
541 if (vbox_connector->vbox_crtc->crtc_id == 0)
542 vbox_report_caps(vbox);
543 if (!vbox->initial_mode_queried) {
544 if (vbox_connector->vbox_crtc->crtc_id == 0) {
545 vbox->initial_mode_queried = true;
546 vbox_report_hotplug(vbox);
547 }
548 return drm_add_modes_noedid(connector, 800, 600);
549 }
550 num_modes = drm_add_modes_noedid(connector, 2560, 1600);
551 preferred_width = vbox_connector->mode_hint.width ?
552 vbox_connector->mode_hint.width : 1024;
553 preferred_height = vbox_connector->mode_hint.height ?
554 vbox_connector->mode_hint.height : 768;
555 mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
556 60, false, false, false);
557 if (mode) {
558 mode->type |= DRM_MODE_TYPE_PREFERRED;
559 drm_mode_probed_add(connector, mode);
560 ++num_modes;
561 }
562 vbox_set_edid(connector, preferred_width, preferred_height);
563 drm_object_property_set_value(
564 &connector->base, vbox->dev->mode_config.suggested_x_property,
565 vbox_connector->vbox_crtc->x_hint);
566 drm_object_property_set_value(
567 &connector->base, vbox->dev->mode_config.suggested_y_property,
568 vbox_connector->vbox_crtc->y_hint);
569
570 return num_modes;
571}
572
573static int vbox_mode_valid(struct drm_connector *connector,
574 struct drm_display_mode *mode)
575{
576 return MODE_OK;
577}
578
579static void vbox_connector_destroy(struct drm_connector *connector)
580{
581 struct vbox_connector *vbox_connector;
582
583 vbox_connector = to_vbox_connector(connector);
584 drm_connector_unregister(connector);
585 drm_connector_cleanup(connector);
586 kfree(connector);
587}
588
589static enum drm_connector_status
590vbox_connector_detect(struct drm_connector *connector, bool force)
591{
592 struct vbox_connector *vbox_connector;
593
594 vbox_connector = to_vbox_connector(connector);
595
596 return vbox_connector->mode_hint.disconnected ?
597 connector_status_disconnected : connector_status_connected;
598}
599
600static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
601 u32 max_y)
602{
603 struct vbox_connector *vbox_connector;
604 struct drm_device *dev;
605 struct drm_display_mode *mode, *iterator;
606
607 vbox_connector = to_vbox_connector(connector);
608 dev = vbox_connector->base.dev;
609 list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
610 list_del(&mode->head);
611 drm_mode_destroy(dev, mode);
612 }
613
614 return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
615}
616
617static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
618 .mode_valid = vbox_mode_valid,
619 .get_modes = vbox_get_modes,
620 .best_encoder = vbox_best_single_encoder,
621};
622
623static const struct drm_connector_funcs vbox_connector_funcs = {
624 .dpms = drm_helper_connector_dpms,
625 .detect = vbox_connector_detect,
626 .fill_modes = vbox_fill_modes,
627 .destroy = vbox_connector_destroy,
628};
629
630static int vbox_connector_init(struct drm_device *dev,
631 struct vbox_crtc *vbox_crtc,
632 struct drm_encoder *encoder)
633{
634 struct vbox_connector *vbox_connector;
635 struct drm_connector *connector;
636
637 vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
638 if (!vbox_connector)
639 return -ENOMEM;
640
641 connector = &vbox_connector->base;
642 vbox_connector->vbox_crtc = vbox_crtc;
643
644 drm_connector_init(dev, connector, &vbox_connector_funcs,
645 DRM_MODE_CONNECTOR_VGA);
646 drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
647
648 connector->interlace_allowed = 0;
649 connector->doublescan_allowed = 0;
650
651 drm_mode_create_suggested_offset_properties(dev);
652 drm_object_attach_property(&connector->base,
653 dev->mode_config.suggested_x_property, -1);
654 drm_object_attach_property(&connector->base,
655 dev->mode_config.suggested_y_property, -1);
656 drm_connector_register(connector);
657
658 drm_mode_connector_attach_encoder(connector, encoder);
659
660 return 0;
661}
662
663int vbox_mode_init(struct drm_device *dev)
664{
665 struct vbox_private *vbox = dev->dev_private;
666 struct drm_encoder *encoder;
667 struct vbox_crtc *vbox_crtc;
668 unsigned int i;
669 int ret;
670
671 /* vbox_cursor_init(dev); */
672 for (i = 0; i < vbox->num_crtcs; ++i) {
673 vbox_crtc = vbox_crtc_init(dev, i);
674 if (!vbox_crtc)
675 return -ENOMEM;
676 encoder = vbox_encoder_init(dev, i);
677 if (!encoder)
678 return -ENOMEM;
679 ret = vbox_connector_init(dev, vbox_crtc, encoder);
680 if (ret)
681 return ret;
682 }
683
684 return 0;
685}
686
687void vbox_mode_fini(struct drm_device *dev)
688{
689 /* vbox_cursor_fini(dev); */
690}
691
692/**
693 * Copy the ARGB image and generate the mask, which is needed in case the host
694 * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
695 * if the corresponding alpha value in the ARGB image is greater than 0xF0.
696 */
697static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
698 size_t mask_size)
699{
700 size_t line_size = (width + 7) / 8;
701 u32 i, j;
702
703 memcpy(dst + mask_size, src, width * height * 4);
704 for (i = 0; i < height; ++i)
705 for (j = 0; j < width; ++j)
706 if (((u32 *)src)[i * width + j] > 0xf0000000)
707 dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
708}
709
710static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
711 u32 handle, u32 width, u32 height,
712 s32 hot_x, s32 hot_y)
713{
714 struct vbox_private *vbox = crtc->dev->dev_private;
715 struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
716 struct ttm_bo_kmap_obj uobj_map;
717 size_t data_size, mask_size;
718 struct drm_gem_object *obj;
719 u32 flags, caps = 0;
720 struct vbox_bo *bo;
721 bool src_isiomem;
722 u8 *dst = NULL;
723 u8 *src;
724 int ret;
725
726 /*
727 * Re-set this regularly as in 5.0.20 and earlier the information was
728 * lost on save and restore.
729 */
730 hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
731 vbox->input_mapping_width,
732 vbox->input_mapping_height);
733 if (!handle) {
734 bool cursor_enabled = false;
735 struct drm_crtc *crtci;
736
737 /* Hide cursor. */
738 vbox_crtc->cursor_enabled = false;
739 list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list,
740 head) {
741 if (to_vbox_crtc(crtci)->cursor_enabled)
742 cursor_enabled = true;
743 }
744
745 if (!cursor_enabled)
746 hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
747 0, 0, NULL, 0);
748 return 0;
749 }
750
751 vbox_crtc->cursor_enabled = true;
752
753 if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
754 width == 0 || height == 0)
755 return -EINVAL;
756
757 ret = hgsmi_query_conf(vbox->guest_pool,
758 VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
759 if (ret)
760 return ret;
761
762 if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
763 /*
764 * -EINVAL means cursor_set2() not supported, -EAGAIN means
765 * retry at once.
766 */
767 return -EBUSY;
768 }
769
770 obj = drm_gem_object_lookup(file_priv, handle);
771 if (!obj) {
772 DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
773 return -ENOENT;
774 }
775
776 bo = gem_to_vbox_bo(obj);
777 ret = vbox_bo_reserve(bo, false);
778 if (ret)
779 goto out_unref_obj;
780
781 /*
782 * The mask must be calculated based on the alpha
783 * channel, one bit per ARGB word, and must be 32-bit
784 * padded.
785 */
786 mask_size = ((width + 7) / 8 * height + 3) & ~3;
787 data_size = width * height * 4 + mask_size;
788 vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width);
789 vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height);
790 vbox->cursor_width = width;
791 vbox->cursor_height = height;
792 vbox->cursor_data_size = data_size;
793 dst = vbox->cursor_data;
794
795 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
796 if (ret) {
797 vbox->cursor_data_size = 0;
798 goto out_unreserve_bo;
799 }
800
801 src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
802 if (src_isiomem) {
803 DRM_ERROR("src cursor bo not in main memory\n");
804 ret = -EIO;
805 goto out_unmap_bo;
806 }
807
808 copy_cursor_image(src, dst, width, height, mask_size);
809
810 flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
811 VBOX_MOUSE_POINTER_ALPHA;
812 ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags,
813 vbox->cursor_hot_x, vbox->cursor_hot_y,
814 width, height, dst, data_size);
815out_unmap_bo:
816 ttm_bo_kunmap(&uobj_map);
817out_unreserve_bo:
818 vbox_bo_unreserve(bo);
819out_unref_obj:
820 drm_gem_object_unreference_unlocked(obj);
821
822 return ret;
823}
824
825static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y)
826{
827 struct vbox_private *vbox = crtc->dev->dev_private;
828 u32 flags = VBOX_MOUSE_POINTER_VISIBLE |
829 VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA;
830 s32 crtc_x =
831 vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint;
832 s32 crtc_y =
833 vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint;
834 u32 host_x, host_y;
835 u32 hot_x = 0;
836 u32 hot_y = 0;
837 int ret;
838
839 /*
840 * We compare these to unsigned later and don't
841 * need to handle negative.
842 */
843 if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0)
844 return 0;
845
846 ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x,
847 y + crtc_y, &host_x, &host_y);
848
849 /*
850 * The only reason we have vbox_cursor_move() is that some older clients
851 * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and
852 * use DRM_MODE_CURSOR_MOVE to set the hot-spot.
853 *
854 * However VirtualBox 5.0.20 and earlier has a bug causing it to return
855 * 0,0 as host cursor location after a save and restore.
856 *
857 * To work around this we ignore a 0, 0 return, since missing the odd
858 * time when it legitimately happens is not going to hurt much.
859 */
860 if (ret || (host_x == 0 && host_y == 0))
861 return ret;
862
863 if (x + crtc_x < host_x)
864 hot_x = min(host_x - x - crtc_x, vbox->cursor_width);
865 if (y + crtc_y < host_y)
866 hot_y = min(host_y - y - crtc_y, vbox->cursor_height);
867
868 if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
869 return 0;
870
871 vbox->cursor_hot_x = hot_x;
872 vbox->cursor_hot_y = hot_y;
873
874 return hgsmi_update_pointer_shape(vbox->guest_pool, flags,
875 hot_x, hot_y, vbox->cursor_width, vbox->cursor_height,
876 vbox->cursor_data, vbox->cursor_data_size);
877}
diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/staging/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..b7453e427a1d
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_prime.c
@@ -0,0 +1,74 @@
1/*
2 * Copyright (C) 2017 Oracle Corporation
3 * Copyright 2017 Canonical
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Andreas Pokorny
24 */
25
26#include "vbox_drv.h"
27
28/*
29 * Based on qxl_prime.c:
30 * Empty Implementations as there should not be any other driver for a virtual
31 * device that might share buffers with vboxvideo
32 */
33
34int vbox_gem_prime_pin(struct drm_gem_object *obj)
35{
36 WARN_ONCE(1, "not implemented");
37 return -ENOSYS;
38}
39
40void vbox_gem_prime_unpin(struct drm_gem_object *obj)
41{
42 WARN_ONCE(1, "not implemented");
43}
44
45struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
46{
47 WARN_ONCE(1, "not implemented");
48 return ERR_PTR(-ENOSYS);
49}
50
51struct drm_gem_object *vbox_gem_prime_import_sg_table(
52 struct drm_device *dev, struct dma_buf_attachment *attach,
53 struct sg_table *table)
54{
55 WARN_ONCE(1, "not implemented");
56 return ERR_PTR(-ENOSYS);
57}
58
59void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
60{
61 WARN_ONCE(1, "not implemented");
62 return ERR_PTR(-ENOSYS);
63}
64
65void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
66{
67 WARN_ONCE(1, "not implemented");
68}
69
70int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
71{
72 WARN_ONCE(1, "not implemented");
73 return -ENOSYS;
74}
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..34a905d40735
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -0,0 +1,472 @@
1/*
2 * Copyright (C) 2013-2017 Oracle Corporation
3 * This file is based on ast_ttm.c
4 * Copyright 2012 Red Hat Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 * Authors: Dave Airlie <airlied@redhat.com>
28 * Michael Thayer <michael.thayer@oracle.com>
29 */
30#include "vbox_drv.h"
31#include <ttm/ttm_page_alloc.h>
32
33static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
34{
35 return container_of(bd, struct vbox_private, ttm.bdev);
36}
37
38static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
39{
40 return ttm_mem_global_init(ref->object);
41}
42
43static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
44{
45 ttm_mem_global_release(ref->object);
46}
47
48/**
49 * Adds the vbox memory manager object/structures to the global memory manager.
50 */
51static int vbox_ttm_global_init(struct vbox_private *vbox)
52{
53 struct drm_global_reference *global_ref;
54 int ret;
55
56 global_ref = &vbox->ttm.mem_global_ref;
57 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
58 global_ref->size = sizeof(struct ttm_mem_global);
59 global_ref->init = &vbox_ttm_mem_global_init;
60 global_ref->release = &vbox_ttm_mem_global_release;
61 ret = drm_global_item_ref(global_ref);
62 if (ret) {
63 DRM_ERROR("Failed setting up TTM memory subsystem.\n");
64 return ret;
65 }
66
67 vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
68 global_ref = &vbox->ttm.bo_global_ref.ref;
69 global_ref->global_type = DRM_GLOBAL_TTM_BO;
70 global_ref->size = sizeof(struct ttm_bo_global);
71 global_ref->init = &ttm_bo_global_init;
72 global_ref->release = &ttm_bo_global_release;
73
74 ret = drm_global_item_ref(global_ref);
75 if (ret) {
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&vbox->ttm.mem_global_ref);
78 return ret;
79 }
80
81 return 0;
82}
83
84/**
85 * Removes the vbox memory manager object from the global memory manager.
86 */
87static void vbox_ttm_global_release(struct vbox_private *vbox)
88{
89 drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&vbox->ttm.mem_global_ref);
91}
92
93static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
94{
95 struct vbox_bo *bo;
96
97 bo = container_of(tbo, struct vbox_bo, bo);
98
99 drm_gem_object_release(&bo->gem);
100 kfree(bo);
101}
102
103static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
104{
105 if (bo->destroy == &vbox_bo_ttm_destroy)
106 return true;
107
108 return false;
109}
110
111static int
112vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
113 struct ttm_mem_type_manager *man)
114{
115 switch (type) {
116 case TTM_PL_SYSTEM:
117 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
118 man->available_caching = TTM_PL_MASK_CACHING;
119 man->default_caching = TTM_PL_FLAG_CACHED;
120 break;
121 case TTM_PL_VRAM:
122 man->func = &ttm_bo_manager_func;
123 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
124 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
125 man->default_caching = TTM_PL_FLAG_WC;
126 break;
127 default:
128 DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
129 return -EINVAL;
130 }
131
132 return 0;
133}
134
135static void
136vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
137{
138 struct vbox_bo *vboxbo = vbox_bo(bo);
139
140 if (!vbox_ttm_bo_is_vbox_bo(bo))
141 return;
142
143 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
144 *pl = vboxbo->placement;
145}
146
147static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
148 struct file *filp)
149{
150 return 0;
151}
152
153static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
154 struct ttm_mem_reg *mem)
155{
156 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
157 struct vbox_private *vbox = vbox_bdev(bdev);
158
159 mem->bus.addr = NULL;
160 mem->bus.offset = 0;
161 mem->bus.size = mem->num_pages << PAGE_SHIFT;
162 mem->bus.base = 0;
163 mem->bus.is_iomem = false;
164 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
165 return -EINVAL;
166 switch (mem->mem_type) {
167 case TTM_PL_SYSTEM:
168 /* system memory */
169 return 0;
170 case TTM_PL_VRAM:
171 mem->bus.offset = mem->start << PAGE_SHIFT;
172 mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
173 mem->bus.is_iomem = true;
174 break;
175 default:
176 return -EINVAL;
177 }
178 return 0;
179}
180
181static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
182 struct ttm_mem_reg *mem)
183{
184}
185
186static int vbox_bo_move(struct ttm_buffer_object *bo,
187 bool evict, bool interruptible,
188 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
189{
190 return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
191}
192
193static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
194{
195 ttm_tt_fini(tt);
196 kfree(tt);
197}
198
199static struct ttm_backend_func vbox_tt_backend_func = {
200 .destroy = &vbox_ttm_backend_destroy,
201};
202
203static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
204 unsigned long size,
205 u32 page_flags,
206 struct page *dummy_read_page)
207{
208 struct ttm_tt *tt;
209
210 tt = kzalloc(sizeof(*tt), GFP_KERNEL);
211 if (!tt)
212 return NULL;
213
214 tt->func = &vbox_tt_backend_func;
215 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
216 kfree(tt);
217 return NULL;
218 }
219
220 return tt;
221}
222
223static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
224{
225 return ttm_pool_populate(ttm);
226}
227
228static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
229{
230 ttm_pool_unpopulate(ttm);
231}
232
233struct ttm_bo_driver vbox_bo_driver = {
234 .ttm_tt_create = vbox_ttm_tt_create,
235 .ttm_tt_populate = vbox_ttm_tt_populate,
236 .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
237 .init_mem_type = vbox_bo_init_mem_type,
238 .eviction_valuable = ttm_bo_eviction_valuable,
239 .evict_flags = vbox_bo_evict_flags,
240 .move = vbox_bo_move,
241 .verify_access = vbox_bo_verify_access,
242 .io_mem_reserve = &vbox_ttm_io_mem_reserve,
243 .io_mem_free = &vbox_ttm_io_mem_free,
244 .io_mem_pfn = ttm_bo_default_io_mem_pfn,
245};
246
247int vbox_mm_init(struct vbox_private *vbox)
248{
249 int ret;
250 struct drm_device *dev = vbox->dev;
251 struct ttm_bo_device *bdev = &vbox->ttm.bdev;
252
253 ret = vbox_ttm_global_init(vbox);
254 if (ret)
255 return ret;
256
257 ret = ttm_bo_device_init(&vbox->ttm.bdev,
258 vbox->ttm.bo_global_ref.ref.object,
259 &vbox_bo_driver,
260 dev->anon_inode->i_mapping,
261 DRM_FILE_PAGE_OFFSET, true);
262 if (ret) {
263 DRM_ERROR("Error initialising bo driver; %d\n", ret);
264 goto err_ttm_global_release;
265 }
266
267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
268 vbox->available_vram_size >> PAGE_SHIFT);
269 if (ret) {
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
271 goto err_device_release;
272 }
273
274#ifdef DRM_MTRR_WC
275 vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
276 pci_resource_len(dev->pdev, 0),
277 DRM_MTRR_WC);
278#else
279 vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
280 pci_resource_len(dev->pdev, 0));
281#endif
282 return 0;
283
284err_device_release:
285 ttm_bo_device_release(&vbox->ttm.bdev);
286err_ttm_global_release:
287 vbox_ttm_global_release(vbox);
288 return ret;
289}
290
291void vbox_mm_fini(struct vbox_private *vbox)
292{
293#ifdef DRM_MTRR_WC
294 drm_mtrr_del(vbox->fb_mtrr,
295 pci_resource_start(vbox->dev->pdev, 0),
296 pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC);
297#else
298 arch_phys_wc_del(vbox->fb_mtrr);
299#endif
300 ttm_bo_device_release(&vbox->ttm.bdev);
301 vbox_ttm_global_release(vbox);
302}
303
304void vbox_ttm_placement(struct vbox_bo *bo, int domain)
305{
306 unsigned int i;
307 u32 c = 0;
308
309 bo->placement.placement = bo->placements;
310 bo->placement.busy_placement = bo->placements;
311
312 if (domain & TTM_PL_FLAG_VRAM)
313 bo->placements[c++].flags =
314 TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
315 if (domain & TTM_PL_FLAG_SYSTEM)
316 bo->placements[c++].flags =
317 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
318 if (!c)
319 bo->placements[c++].flags =
320 TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
321
322 bo->placement.num_placement = c;
323 bo->placement.num_busy_placement = c;
324
325 for (i = 0; i < c; ++i) {
326 bo->placements[i].fpfn = 0;
327 bo->placements[i].lpfn = 0;
328 }
329}
330
331int vbox_bo_create(struct drm_device *dev, int size, int align,
332 u32 flags, struct vbox_bo **pvboxbo)
333{
334 struct vbox_private *vbox = dev->dev_private;
335 struct vbox_bo *vboxbo;
336 size_t acc_size;
337 int ret;
338
339 vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
340 if (!vboxbo)
341 return -ENOMEM;
342
343 ret = drm_gem_object_init(dev, &vboxbo->gem, size);
344 if (ret)
345 goto err_free_vboxbo;
346
347 vboxbo->bo.bdev = &vbox->ttm.bdev;
348
349 vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
350
351 acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
352 sizeof(struct vbox_bo));
353
354 ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
355 ttm_bo_type_device, &vboxbo->placement,
356 align >> PAGE_SHIFT, false, NULL, acc_size,
357 NULL, NULL, vbox_bo_ttm_destroy);
358 if (ret)
359 goto err_free_vboxbo;
360
361 *pvboxbo = vboxbo;
362
363 return 0;
364
365err_free_vboxbo:
366 kfree(vboxbo);
367 return ret;
368}
369
370static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
371{
372 return bo->bo.offset;
373}
374
375int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
376{
377 int i, ret;
378
379 if (bo->pin_count) {
380 bo->pin_count++;
381 if (gpu_addr)
382 *gpu_addr = vbox_bo_gpu_offset(bo);
383
384 return 0;
385 }
386
387 vbox_ttm_placement(bo, pl_flag);
388
389 for (i = 0; i < bo->placement.num_placement; i++)
390 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
391
392 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
393 if (ret)
394 return ret;
395
396 bo->pin_count = 1;
397
398 if (gpu_addr)
399 *gpu_addr = vbox_bo_gpu_offset(bo);
400
401 return 0;
402}
403
404int vbox_bo_unpin(struct vbox_bo *bo)
405{
406 int i, ret;
407
408 if (!bo->pin_count) {
409 DRM_ERROR("unpin bad %p\n", bo);
410 return 0;
411 }
412 bo->pin_count--;
413 if (bo->pin_count)
414 return 0;
415
416 for (i = 0; i < bo->placement.num_placement; i++)
417 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
418
419 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
420 if (ret)
421 return ret;
422
423 return 0;
424}
425
426/*
427 * Move a vbox-owned buffer object to system memory if no one else has it
428 * pinned. The caller must have pinned it previously, and this call will
429 * release the caller's pin.
430 */
431int vbox_bo_push_sysram(struct vbox_bo *bo)
432{
433 int i, ret;
434
435 if (!bo->pin_count) {
436 DRM_ERROR("unpin bad %p\n", bo);
437 return 0;
438 }
439 bo->pin_count--;
440 if (bo->pin_count)
441 return 0;
442
443 if (bo->kmap.virtual)
444 ttm_bo_kunmap(&bo->kmap);
445
446 vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
447
448 for (i = 0; i < bo->placement.num_placement; i++)
449 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
450
451 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
452 if (ret) {
453 DRM_ERROR("pushing to VRAM failed\n");
454 return ret;
455 }
456
457 return 0;
458}
459
460int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
461{
462 struct drm_file *file_priv;
463 struct vbox_private *vbox;
464
465 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
466 return -EINVAL;
467
468 file_priv = filp->private_data;
469 vbox = file_priv->minor->dev->dev_private;
470
471 return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
472}
diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/staging/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..d835d75d761c
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo.h
@@ -0,0 +1,491 @@
1/*
2 * Copyright (C) 2006-2016 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 */
23
24#ifndef __VBOXVIDEO_H__
25#define __VBOXVIDEO_H__
26
27/*
28 * This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in
29 * src/VBox/Main/xml/VirtualBox-settings-common.xsd
30 */
31#define VBOX_VIDEO_MAX_SCREENS 64
32
33/*
34 * The last 4096 bytes of the guest VRAM contains the generic info for all
35 * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
36 *
37 * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
38 * etc. This is used exclusively by the corresponding instance of a display
39 * driver.
40 *
41 * The VRAM layout:
42 * Last 4096 bytes - Adapter information area.
43 * 4096 bytes aligned miniport heap (value specified in the config rouded up).
44 * Slack - what left after dividing the VRAM.
45 * 4096 bytes aligned framebuffers:
46 * last 4096 bytes of each framebuffer is the display information area.
47 *
48 * The Virtual Graphics Adapter information in the guest VRAM is stored by the
49 * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
50 *
51 * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
52 * the host starts to process the info. The first element at the start of
53 * the 4096 bytes region should be normally be a LINK that points to
54 * actual information chain. That way the guest driver can have some
55 * fixed layout of the information memory block and just rewrite
56 * the link to point to relevant memory chain.
57 *
58 * The processing stops at the END element.
59 *
60 * The host can access the memory only when the port IO is processed.
61 * All data that will be needed later must be copied from these 4096 bytes.
62 * But other VRAM can be used by host until the mode is disabled.
63 *
64 * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
65 * to disable the mode.
66 *
67 * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
68 * from the host and issue commands to the host.
69 *
70 * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
71 * following operations with the VBE data register can be performed:
72 *
73 * Operation Result
74 * write 16 bit value NOP
75 * read 16 bit value count of monitors
76 * write 32 bit value set the vbox cmd value and the cmd processed by the host
77 * read 32 bit value result of the last vbox command is returned
78 */
79
80/**
81 * VBVA command header.
82 *
83 * @todo Where does this fit in?
84 */
85struct vbva_cmd_hdr {
86 /** Coordinates of affected rectangle. */
87 s16 x;
88 s16 y;
89 u16 w;
90 u16 h;
91} __packed;
92
93/** @name VBVA ring defines.
94 *
95 * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
96 * data. For example big bitmaps which do not fit to the buffer.
97 *
98 * Guest starts writing to the buffer by initializing a record entry in the
99 * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
100 * written. As data is written to the ring buffer, the guest increases
101 * free_offset.
102 *
103 * The host reads the records on flushes and processes all completed records.
104 * When host encounters situation when only a partial record presents and
105 * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
106 * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
107 * data_offset. After that on each flush the host continues fetching the data
108 * until the record is completed.
109 *
110 */
111#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
112#define VBVA_RING_BUFFER_THRESHOLD (4096)
113
114#define VBVA_MAX_RECORDS (64)
115
116#define VBVA_F_MODE_ENABLED 0x00000001u
117#define VBVA_F_MODE_VRDP 0x00000002u
118#define VBVA_F_MODE_VRDP_RESET 0x00000004u
119#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
120
121#define VBVA_F_STATE_PROCESSING 0x00010000u
122
123#define VBVA_F_RECORD_PARTIAL 0x80000000u
124
125/**
126 * VBVA record.
127 */
128struct vbva_record {
129 /** The length of the record. Changed by guest. */
130 u32 len_and_flags;
131} __packed;
132
133/*
134 * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
135 * the runtime heapsimple API. Use minimum 2 pages here, because the info area
136 * also may contain other data (for example hgsmi_host_flags structure).
137 */
138#define VBVA_ADAPTER_INFORMATION_SIZE 65536
139#define VBVA_MIN_BUFFER_SIZE 65536
140
141/* The value for port IO to let the adapter to interpret the adapter memory. */
142#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
143
144/* The value for port IO to let the adapter to interpret the adapter memory. */
145#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
146
147/* The value for port IO to let the adapter to interpret the display memory.
148 * The display number is encoded in low 16 bits.
149 */
150#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
151
152struct vbva_host_flags {
153 u32 host_events;
154 u32 supported_orders;
155} __packed;
156
157struct vbva_buffer {
158 struct vbva_host_flags host_flags;
159
160 /* The offset where the data start in the buffer. */
161 u32 data_offset;
162 /* The offset where next data must be placed in the buffer. */
163 u32 free_offset;
164
165 /* The queue of record descriptions. */
166 struct vbva_record records[VBVA_MAX_RECORDS];
167 u32 record_first_index;
168 u32 record_free_index;
169
170 /* Space to leave free when large partial records are transferred. */
171 u32 partial_write_tresh;
172
173 u32 data_len;
174 /* variable size for the rest of the vbva_buffer area in VRAM. */
175 u8 data[0];
176} __packed;
177
178#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
179
180/* guest->host commands */
181#define VBVA_QUERY_CONF32 1
182#define VBVA_SET_CONF32 2
183#define VBVA_INFO_VIEW 3
184#define VBVA_INFO_HEAP 4
185#define VBVA_FLUSH 5
186#define VBVA_INFO_SCREEN 6
187#define VBVA_ENABLE 7
188#define VBVA_MOUSE_POINTER_SHAPE 8
189/* informs host about HGSMI caps. see vbva_caps below */
190#define VBVA_INFO_CAPS 12
191/* configures scanline, see VBVASCANLINECFG below */
192#define VBVA_SCANLINE_CFG 13
193/* requests scanline info, see VBVASCANLINEINFO below */
194#define VBVA_SCANLINE_INFO 14
195/* inform host about VBVA Command submission */
196#define VBVA_CMDVBVA_SUBMIT 16
197/* inform host about VBVA Command submission */
198#define VBVA_CMDVBVA_FLUSH 17
199/* G->H DMA command */
200#define VBVA_CMDVBVA_CTL 18
201/* Query most recent mode hints sent */
202#define VBVA_QUERY_MODE_HINTS 19
203/**
204 * Report the guest virtual desktop position and size for mapping host and
205 * guest pointer positions.
206 */
207#define VBVA_REPORT_INPUT_MAPPING 20
208/** Report the guest cursor position and query the host position. */
209#define VBVA_CURSOR_POSITION 21
210
211/* host->guest commands */
212#define VBVAHG_EVENT 1
213#define VBVAHG_DISPLAY_CUSTOM 2
214
215/* vbva_conf32::index */
216#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
217#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
218/**
219 * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
220 * Set value to VERR_NOT_SUPPORTED before calling.
221 */
222#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
223/**
224 * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
225 * VBVA. Set value to VERR_NOT_SUPPORTED before calling.
226 */
227#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
228/**
229 * Returns the currently available host cursor capabilities. Available if
230 * vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
231 * @see VMMDevReqMouseStatus::mouseFeatures.
232 */
233#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
234/** Returns the supported flags in vbva_infoscreen::flags. */
235#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
236/** Returns the max size of VBVA record. */
237#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
238
239struct vbva_conf32 {
240 u32 index;
241 u32 value;
242} __packed;
243
244/** Reserved for historical reasons. */
245#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
246/**
247 * Guest cursor capability: can the host show a hardware cursor at the host
248 * pointer location?
249 */
250#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
251/** Reserved for historical reasons. */
252#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
253/** Reserved for historical reasons. Must always be unset. */
254#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
255/** Reserved for historical reasons. */
256#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
257/** Reserved for historical reasons. */
258#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
259
260struct vbva_infoview {
261 /* Index of the screen, assigned by the guest. */
262 u32 view_index;
263
264 /* The screen offset in VRAM, the framebuffer starts here. */
265 u32 view_offset;
266
267 /* The size of the VRAM memory that can be used for the view. */
268 u32 view_size;
269
270 /* The recommended maximum size of the VRAM memory for the screen. */
271 u32 max_screen_size;
272} __packed;
273
274struct vbva_flush {
275 u32 reserved;
276} __packed;
277
278/* vbva_infoscreen::flags */
279#define VBVA_SCREEN_F_NONE 0x0000
280#define VBVA_SCREEN_F_ACTIVE 0x0001
281/**
282 * The virtual monitor has been disabled by the guest and should be removed
283 * by the host and ignored for purposes of pointer position calculation.
284 */
285#define VBVA_SCREEN_F_DISABLED 0x0002
286/**
287 * The virtual monitor has been blanked by the guest and should be blacked
288 * out by the host using width, height, etc values from the vbva_infoscreen
289 * request.
290 */
291#define VBVA_SCREEN_F_BLANK 0x0004
292/**
293 * The virtual monitor has been blanked by the guest and should be blacked
294 * out by the host using the previous mode values for width. height, etc.
295 */
296#define VBVA_SCREEN_F_BLANK2 0x0008
297
298struct vbva_infoscreen {
299 /* Which view contains the screen. */
300 u32 view_index;
301
302 /* Physical X origin relative to the primary screen. */
303 s32 origin_x;
304
305 /* Physical Y origin relative to the primary screen. */
306 s32 origin_y;
307
308 /* Offset of visible framebuffer relative to the framebuffer start. */
309 u32 start_offset;
310
311 /* The scan line size in bytes. */
312 u32 line_size;
313
314 /* Width of the screen. */
315 u32 width;
316
317 /* Height of the screen. */
318 u32 height;
319
320 /* Color depth. */
321 u16 bits_per_pixel;
322
323 /* VBVA_SCREEN_F_* */
324 u16 flags;
325} __packed;
326
327/* vbva_enable::flags */
328#define VBVA_F_NONE 0x00000000
329#define VBVA_F_ENABLE 0x00000001
330#define VBVA_F_DISABLE 0x00000002
331/* extended VBVA to be used with WDDM */
332#define VBVA_F_EXTENDED 0x00000004
333/* vbva offset is absolute VRAM offset */
334#define VBVA_F_ABSOFFSET 0x00000008
335
336struct vbva_enable {
337 u32 flags;
338 u32 offset;
339 s32 result;
340} __packed;
341
342struct vbva_enable_ex {
343 struct vbva_enable base;
344 u32 screen_id;
345} __packed;
346
347struct vbva_mouse_pointer_shape {
348 /* The host result. */
349 s32 result;
350
351 /* VBOX_MOUSE_POINTER_* bit flags. */
352 u32 flags;
353
354 /* X coordinate of the hot spot. */
355 u32 hot_X;
356
357 /* Y coordinate of the hot spot. */
358 u32 hot_y;
359
360 /* Width of the pointer in pixels. */
361 u32 width;
362
363 /* Height of the pointer in scanlines. */
364 u32 height;
365
366 /* Pointer data.
367 *
368 ****
369 * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
370 * mask.
371 *
372 * For pointers without alpha channel the XOR mask pixels are 32 bit
373 * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
374 * consists of (lsb)BGRA(msb) 32 bit values.
375 *
376 * Guest driver must create the AND mask for pointers with alpha chan.,
377 * so if host does not support alpha, the pointer could be displayed as
378 * a normal color pointer. The AND mask can be constructed from alpha
379 * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
380 *
381 * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
382 * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
383 * bits at the end of any scanline are undefined.
384 *
385 * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
386 * u8 *xor = and + (and_len + 3) & ~3
387 * Bytes in the gap between the AND and the XOR mask are undefined.
388 * XOR mask scanlines have no gap between them and size of XOR mask is:
389 * xor_len = width * 4 * height.
390 ****
391 *
392 * Preallocate 4 bytes for accessing actual data as p->data.
393 */
394 u8 data[4];
395} __packed;
396
397/**
398 * @name vbva_mouse_pointer_shape::flags
399 * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver,
400 * values must be <= 0x8000 and must not be changed. (try make more sense
401 * of this, please).
402 * @{
403 */
404
405/** pointer is visible */
406#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
407/** pointer has alpha channel */
408#define VBOX_MOUSE_POINTER_ALPHA 0x0002
409/** pointerData contains new pointer shape */
410#define VBOX_MOUSE_POINTER_SHAPE 0x0004
411
412/** @} */
413
414/*
415 * The guest driver can handle asynch guest cmd completion by reading the
416 * command offset from io port.
417 */
418#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
419/* the guest driver can handle video adapter IRQs */
420#define VBVACAPS_IRQ 0x00000002
421/** The guest can read video mode hints sent via VBVA. */
422#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
423/** The guest can switch to a software cursor on demand. */
424#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
425/** The guest does not depend on host handling the VBE registers. */
426#define VBVACAPS_USE_VBVA_ONLY 0x00000010
427
428struct vbva_caps {
429 s32 rc;
430 u32 caps;
431} __packed;
432
433/** Query the most recent mode hints received from the host. */
434struct vbva_query_mode_hints {
435 /** The maximum number of screens to return hints for. */
436 u16 hints_queried_count;
437 /** The size of the mode hint structures directly following this one. */
438 u16 hint_structure_guest_size;
439 /** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
440 s32 rc;
441} __packed;
442
443/**
444 * Structure in which a mode hint is returned. The guest allocates an array
445 * of these immediately after the vbva_query_mode_hints structure.
446 * To accommodate future extensions, the vbva_query_mode_hints structure
447 * specifies the size of the vbva_modehint structures allocated by the guest,
448 * and the host only fills out structure elements which fit into that size. The
449 * host should fill any unused members (e.g. dx, dy) or structure space on the
450 * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
451 */
452struct vbva_modehint {
453 u32 magic;
454 u32 cx;
455 u32 cy;
456 u32 bpp; /* Which has never been used... */
457 u32 display;
458 u32 dx; /**< X offset into the virtual frame-buffer. */
459 u32 dy; /**< Y offset into the virtual frame-buffer. */
460 u32 enabled; /* Not flags. Add new members for new flags. */
461} __packed;
462
463#define VBVAMODEHINT_MAGIC 0x0801add9u
464
465/**
466 * Report the rectangle relative to which absolute pointer events should be
467 * expressed. This information remains valid until the next VBVA resize event
468 * for any screen, at which time it is reset to the bounding rectangle of all
469 * virtual screens and must be re-set.
470 * @see VBVA_REPORT_INPUT_MAPPING.
471 */
472struct vbva_report_input_mapping {
473 s32 x; /**< Upper left X co-ordinate relative to the first screen. */
474 s32 y; /**< Upper left Y co-ordinate relative to the first screen. */
475 u32 cx; /**< Rectangle width. */
476 u32 cy; /**< Rectangle height. */
477} __packed;
478
479/**
480 * Report the guest cursor position and query the host one. The host may wish
481 * to use the guest information to re-position its own cursor (though this is
482 * currently unlikely).
483 * @see VBVA_CURSOR_POSITION
484 */
485struct vbva_cursor_position {
486 u32 report_position; /**< Are we reporting a position? */
487 u32 x; /**< Guest cursor X position */
488 u32 y; /**< Guest cursor Y position */
489} __packed;
490
491#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/staging/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..d09da841711a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VBOXVIDEO_GUEST_H__
24#define __VBOXVIDEO_GUEST_H__
25
26#include <linux/genalloc.h>
27#include "vboxvideo.h"
28
29/**
30 * Structure grouping the context needed for sending graphics acceleration
31 * information to the host via VBVA. Each screen has its own VBVA buffer.
32 */
33struct vbva_buf_ctx {
34 /** Offset of the buffer in the VRAM section for the screen */
35 u32 buffer_offset;
36 /** Length of the buffer in bytes */
37 u32 buffer_length;
38 /** Set if we wrote to the buffer faster than the host could read it */
39 bool buffer_overflow;
40 /** VBVA record that we are currently preparing for the host, or NULL */
41 struct vbva_record *record;
42 /**
43 * Pointer to the VBVA buffer mapped into the current address space.
44 * Will be NULL if VBVA is not enabled.
45 */
46 struct vbva_buffer *vbva;
47};
48
49/**
50 * @name Base HGSMI APIs
51 * @{
52 */
53int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
54int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
55int hgsmi_test_query_conf(struct gen_pool *ctx);
56int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
57int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
58 u32 hot_x, u32 hot_y, u32 width, u32 height,
59 u8 *pixels, u32 len);
60int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
61 u32 x, u32 y, u32 *x_host, u32 *y_host);
62/** @} */
63
64/**
65 * @name VBVA APIs
66 * @{
67 */
68bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
69 struct vbva_buffer *vbva, s32 screen);
70void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
71 s32 screen);
72bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
73 struct gen_pool *ctx);
74void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
75bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
76 const void *p, u32 len);
77void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
78 u32 buffer_offset, u32 buffer_length);
79/** @} */
80
81/**
82 * @name Modesetting APIs
83 * @{
84 */
85void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
86 s32 origin_x, s32 origin_y, u32 start_offset,
87 u32 pitch, u32 width, u32 height,
88 u16 bpp, u16 flags);
89int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
90 u32 width, u32 height);
91int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
92 struct vbva_modehint *hints);
93/** @} */
94
95#endif
diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/staging/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..f842f4d9c80a
--- /dev/null
+++ b/drivers/staging/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __VBOXVIDEO_VBE_H__
24#define __VBOXVIDEO_VBE_H__
25
26/* GUEST <-> HOST Communication API */
27
28/**
29 * @todo FIXME: Either dynamicly ask host for this or put somewhere high in
30 * physical memory like 0xE0000000.
31 */
32
33#define VBE_DISPI_BANK_ADDRESS 0xA0000
34#define VBE_DISPI_BANK_SIZE_KB 64
35
36#define VBE_DISPI_MAX_XRES 16384
37#define VBE_DISPI_MAX_YRES 16384
38#define VBE_DISPI_MAX_BPP 32
39
40#define VBE_DISPI_IOPORT_INDEX 0x01CE
41#define VBE_DISPI_IOPORT_DATA 0x01CF
42
43#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
44#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
45
46#define VBE_DISPI_INDEX_ID 0x0
47#define VBE_DISPI_INDEX_XRES 0x1
48#define VBE_DISPI_INDEX_YRES 0x2
49#define VBE_DISPI_INDEX_BPP 0x3
50#define VBE_DISPI_INDEX_ENABLE 0x4
51#define VBE_DISPI_INDEX_BANK 0x5
52#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
53#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
54#define VBE_DISPI_INDEX_X_OFFSET 0x8
55#define VBE_DISPI_INDEX_Y_OFFSET 0x9
56#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
57#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
58
59#define VBE_DISPI_ID0 0xB0C0
60#define VBE_DISPI_ID1 0xB0C1
61#define VBE_DISPI_ID2 0xB0C2
62#define VBE_DISPI_ID3 0xB0C3
63#define VBE_DISPI_ID4 0xB0C4
64
65#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
66/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
67#define VBE_DISPI_ID_HGSMI 0xBE01
68#define VBE_DISPI_ID_ANYX 0xBE02
69
70#define VBE_DISPI_DISABLED 0x00
71#define VBE_DISPI_ENABLED 0x01
72#define VBE_DISPI_GETCAPS 0x02
73#define VBE_DISPI_8BIT_DAC 0x20
74/**
75 * @note this definition is a BOCHS legacy, used only in the video BIOS
76 * code and ignored by the emulated hardware.
77 */
78#define VBE_DISPI_LFB_ENABLED 0x40
79#define VBE_DISPI_NOCLEARMEM 0x80
80
81#define VGA_PORT_HGSMI_HOST 0x3b0
82#define VGA_PORT_HGSMI_GUEST 0x3d0
83
84#endif
diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/staging/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..c10c782f94e1
--- /dev/null
+++ b/drivers/staging/vboxvideo/vbva_base.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright (C) 2006-2017 Oracle Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "vbox_drv.h"
24#include "vbox_err.h"
25#include "vboxvideo_guest.h"
26#include "hgsmi_channels.h"
27
28/*
29 * There is a hardware ring buffer in the graphics device video RAM, formerly
30 * in the VBox VMMDev PCI memory space.
31 * All graphics commands go there serialized by vbva_buffer_begin_update.
32 * and vbva_buffer_end_update.
33 *
34 * free_offset is writing position. data_offset is reading position.
35 * free_offset == data_offset means buffer is empty.
36 * There must be always gap between data_offset and free_offset when data
37 * are in the buffer.
38 * Guest only changes free_offset, host changes data_offset.
39 */
40
41static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
42{
43 s32 diff = vbva->data_offset - vbva->free_offset;
44
45 return diff > 0 ? diff : vbva->data_len + diff;
46}
47
48static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
49 const void *p, u32 len, u32 offset)
50{
51 struct vbva_buffer *vbva = vbva_ctx->vbva;
52 u32 bytes_till_boundary = vbva->data_len - offset;
53 u8 *dst = &vbva->data[offset];
54 s32 diff = len - bytes_till_boundary;
55
56 if (diff <= 0) {
57 /* Chunk will not cross buffer boundary. */
58 memcpy(dst, p, len);
59 } else {
60 /* Chunk crosses buffer boundary. */
61 memcpy(dst, p, bytes_till_boundary);
62 memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
63 }
64}
65
66static void vbva_buffer_flush(struct gen_pool *ctx)
67{
68 struct vbva_flush *p;
69
70 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
71 if (!p)
72 return;
73
74 p->reserved = 0;
75
76 hgsmi_buffer_submit(ctx, p);
77 hgsmi_buffer_free(ctx, p);
78}
79
80bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
81 const void *p, u32 len)
82{
83 struct vbva_record *record;
84 struct vbva_buffer *vbva;
85 u32 available;
86
87 vbva = vbva_ctx->vbva;
88 record = vbva_ctx->record;
89
90 if (!vbva || vbva_ctx->buffer_overflow ||
91 !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
92 return false;
93
94 available = vbva_buffer_available(vbva);
95
96 while (len > 0) {
97 u32 chunk = len;
98
99 if (chunk >= available) {
100 vbva_buffer_flush(ctx);
101 available = vbva_buffer_available(vbva);
102 }
103
104 if (chunk >= available) {
105 if (WARN_ON(available <= vbva->partial_write_tresh)) {
106 vbva_ctx->buffer_overflow = true;
107 return false;
108 }
109 chunk = available - vbva->partial_write_tresh;
110 }
111
112 vbva_buffer_place_data_at(vbva_ctx, p, chunk,
113 vbva->free_offset);
114
115 vbva->free_offset = (vbva->free_offset + chunk) %
116 vbva->data_len;
117 record->len_and_flags += chunk;
118 available -= chunk;
119 len -= chunk;
120 p += chunk;
121 }
122
123 return true;
124}
125
126static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
127 struct gen_pool *ctx, s32 screen, bool enable)
128{
129 struct vbva_enable_ex *p;
130 bool ret;
131
132 p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
133 if (!p)
134 return false;
135
136 p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
137 p->base.offset = vbva_ctx->buffer_offset;
138 p->base.result = VERR_NOT_SUPPORTED;
139 if (screen >= 0) {
140 p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
141 p->screen_id = screen;
142 }
143
144 hgsmi_buffer_submit(ctx, p);
145
146 if (enable)
147 ret = RT_SUCCESS(p->base.result);
148 else
149 ret = true;
150
151 hgsmi_buffer_free(ctx, p);
152
153 return ret;
154}
155
156bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
157 struct vbva_buffer *vbva, s32 screen)
158{
159 bool ret = false;
160
161 memset(vbva, 0, sizeof(*vbva));
162 vbva->partial_write_tresh = 256;
163 vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
164 vbva_ctx->vbva = vbva;
165
166 ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
167 if (!ret)
168 vbva_disable(vbva_ctx, ctx, screen);
169
170 return ret;
171}
172
173void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
174 s32 screen)
175{
176 vbva_ctx->buffer_overflow = false;
177 vbva_ctx->record = NULL;
178 vbva_ctx->vbva = NULL;
179
180 vbva_inform_host(vbva_ctx, ctx, screen, false);
181}
182
183bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
184 struct gen_pool *ctx)
185{
186 struct vbva_record *record;
187 u32 next;
188
189 if (!vbva_ctx->vbva ||
190 !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
191 return false;
192
193 WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
194
195 next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
196
197 /* Flush if all slots in the records queue are used */
198 if (next == vbva_ctx->vbva->record_first_index)
199 vbva_buffer_flush(ctx);
200
201 /* If even after flush there is no place then fail the request */
202 if (next == vbva_ctx->vbva->record_first_index)
203 return false;
204
205 record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
206 record->len_and_flags = VBVA_F_RECORD_PARTIAL;
207 vbva_ctx->vbva->record_free_index = next;
208 /* Remember which record we are using. */
209 vbva_ctx->record = record;
210
211 return true;
212}
213
214void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
215{
216 struct vbva_record *record = vbva_ctx->record;
217
218 WARN_ON(!vbva_ctx->vbva || !record ||
219 !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
220
221 /* Mark the record completed. */
222 record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
223
224 vbva_ctx->buffer_overflow = false;
225 vbva_ctx->record = NULL;
226}
227
228void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
229 u32 buffer_offset, u32 buffer_length)
230{
231 vbva_ctx->buffer_offset = buffer_offset;
232 vbva_ctx->buffer_length = buffer_length;
233}
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 030bec855d86..314ffac50bb8 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev)
3391 struct device_node *fw_node; 3391 struct device_node *fw_node;
3392 struct rpi_firmware *fw; 3392 struct rpi_firmware *fw;
3393 int err; 3393 int err;
3394 void *ptr_err;
3395 3394
3396 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0); 3395 fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0);
3397 if (!fw_node) { 3396 if (!fw_node) {
@@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev)
3427 3426
3428 /* create sysfs entries */ 3427 /* create sysfs entries */
3429 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME); 3428 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3430 ptr_err = vchiq_class; 3429 err = PTR_ERR(vchiq_class);
3431 if (IS_ERR(ptr_err)) 3430 if (IS_ERR(vchiq_class))
3432 goto failed_class_create; 3431 goto failed_class_create;
3433 3432
3434 vchiq_dev = device_create(vchiq_class, NULL, 3433 vchiq_dev = device_create(vchiq_class, NULL,
3435 vchiq_devid, NULL, "vchiq"); 3434 vchiq_devid, NULL, "vchiq");
3436 ptr_err = vchiq_dev; 3435 err = PTR_ERR(vchiq_dev);
3437 if (IS_ERR(ptr_err)) 3436 if (IS_ERR(vchiq_dev))
3438 goto failed_device_create; 3437 goto failed_device_create;
3439 3438
3440 /* create debugfs entries */ 3439 /* create debugfs entries */
@@ -3455,7 +3454,6 @@ failed_device_create:
3455 class_destroy(vchiq_class); 3454 class_destroy(vchiq_class);
3456failed_class_create: 3455failed_class_create:
3457 cdev_del(&vchiq_cdev); 3456 cdev_del(&vchiq_cdev);
3458 err = PTR_ERR(ptr_err);
3459failed_cdev_add: 3457failed_cdev_add:
3460 unregister_chrdev_region(vchiq_devid, 1); 3458 unregister_chrdev_region(vchiq_devid, 1);
3461failed_platform_init: 3459failed_platform_init: