aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/platform/pvh/Makefile5
-rw-r--r--arch/x86/platform/pvh/enlighten.c137
-rw-r--r--arch/x86/platform/pvh/head.S (renamed from arch/x86/xen/xen-pvh.S)0
-rw-r--r--arch/x86/xen/Kconfig3
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten_pvh.c92
-rw-r--r--arch/x86/xen/xen-asm_64.S2
-rw-r--r--drivers/gpu/drm/xen/Kconfig1
-rw-r--r--drivers/gpu/drm/xen/Makefile1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c65
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c414
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.h64
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c553
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c3
-rw-r--r--include/xen/interface/hvm/start_info.h63
-rw-r--r--include/xen/xen-front-pgdir-shbuf.h89
-rw-r--r--include/xen/xen.h3
-rw-r--r--sound/xen/Kconfig1
-rw-r--r--sound/xen/Makefile1
-rw-r--r--sound/xen/xen_snd_front.c7
-rw-r--r--sound/xen/xen_snd_front.h4
-rw-r--r--sound/xen/xen_snd_front_alsa.c102
-rw-r--r--sound/xen/xen_snd_front_shbuf.c194
-rw-r--r--sound/xen/xen_snd_front_shbuf.h36
31 files changed, 996 insertions, 862 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index a8666d1a3a51..80b377dda900 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16590,6 +16590,7 @@ L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
16590T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git 16590T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
16591S: Supported 16591S: Supported
16592F: arch/x86/xen/ 16592F: arch/x86/xen/
16593F: arch/x86/platform/pvh/
16593F: drivers/*/xen-*front.c 16594F: drivers/*/xen-*front.c
16594F: drivers/xen/ 16595F: drivers/xen/
16595F: arch/x86/include/asm/xen/ 16596F: arch/x86/include/asm/xen/
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0038a2d10a7a..c625f57472f7 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/
7# Xen paravirtualization support 7# Xen paravirtualization support
8obj-$(CONFIG_XEN) += xen/ 8obj-$(CONFIG_XEN) += xen/
9 9
10obj-$(CONFIG_PVH) += platform/pvh/
11
10# Hyper-V paravirtualization support 12# Hyper-V paravirtualization support
11obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ 13obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/
12 14
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8689e794a43c..c2a22a74abee 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -796,6 +796,12 @@ config KVM_GUEST
796 underlying device model, the host provides the guest with 796 underlying device model, the host provides the guest with
797 timing infrastructure such as time of day, and system time 797 timing infrastructure such as time of day, and system time
798 798
799config PVH
800 bool "Support for running PVH guests"
801 ---help---
802 This option enables the PVH entry point for guest virtual machines
803 as specified in the x86/HVM direct boot ABI.
804
799config KVM_DEBUG_FS 805config KVM_DEBUG_FS
800 bool "Enable debug information for KVM Guests in debugfs" 806 bool "Enable debug information for KVM Guests in debugfs"
801 depends on KVM_GUEST && DEBUG_FS 807 depends on KVM_GUEST && DEBUG_FS
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 747c758f67b7..d1dbe8e4eb82 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -386,7 +386,7 @@ NEXT_PAGE(early_dynamic_pgts)
386 386
387 .data 387 .data
388 388
389#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) 389#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
390NEXT_PGD_PAGE(init_top_pgt) 390NEXT_PGD_PAGE(init_top_pgt)
391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC 391 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
392 .org init_top_pgt + L4_PAGE_OFFSET*8, 0 392 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
diff --git a/arch/x86/platform/pvh/Makefile b/arch/x86/platform/pvh/Makefile
new file mode 100644
index 000000000000..5dec5067c9fb
--- /dev/null
+++ b/arch/x86/platform/pvh/Makefile
@@ -0,0 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0
2OBJECT_FILES_NON_STANDARD_head.o := y
3
4obj-$(CONFIG_PVH) += enlighten.o
5obj-$(CONFIG_PVH) += head.o
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
new file mode 100644
index 000000000000..62f5c7045944
--- /dev/null
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -0,0 +1,137 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/acpi.h>
3
4#include <xen/hvc-console.h>
5
6#include <asm/io_apic.h>
7#include <asm/hypervisor.h>
8#include <asm/e820/api.h>
9#include <asm/x86_init.h>
10
11#include <asm/xen/interface.h>
12
13#include <xen/xen.h>
14#include <xen/interface/hvm/start_info.h>
15
16/*
17 * PVH variables.
18 *
19 * pvh_bootparams and pvh_start_info need to live in the data segment since
20 * they are used after startup_{32|64}, which clear .bss, are invoked.
21 */
22struct boot_params pvh_bootparams __attribute__((section(".data")));
23struct hvm_start_info pvh_start_info __attribute__((section(".data")));
24
25unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
26
27static u64 pvh_get_root_pointer(void)
28{
29 return pvh_start_info.rsdp_paddr;
30}
31
32/*
33 * Xen guests are able to obtain the memory map from the hypervisor via the
34 * HYPERVISOR_memory_op hypercall.
35 * If we are trying to boot a Xen PVH guest, it is expected that the kernel
36 * will have been configured to provide an override for this routine to do
37 * just that.
38 */
39void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused)
40{
41 xen_raw_printk("Error: Could not find memory map\n");
42 BUG();
43}
44
45static void __init init_pvh_bootparams(bool xen_guest)
46{
47 memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
48
49 if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) {
50 struct hvm_memmap_table_entry *ep;
51 int i;
52
53 ep = __va(pvh_start_info.memmap_paddr);
54 pvh_bootparams.e820_entries = pvh_start_info.memmap_entries;
55
56 for (i = 0; i < pvh_bootparams.e820_entries ; i++, ep++) {
57 pvh_bootparams.e820_table[i].addr = ep->addr;
58 pvh_bootparams.e820_table[i].size = ep->size;
59 pvh_bootparams.e820_table[i].type = ep->type;
60 }
61 } else if (xen_guest) {
62 mem_map_via_hcall(&pvh_bootparams);
63 } else {
64 /* Non-xen guests are not supported by version 0 */
65 BUG();
66 }
67
68 if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
69 pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
70 ISA_START_ADDRESS;
71 pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
72 ISA_END_ADDRESS - ISA_START_ADDRESS;
73 pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
74 E820_TYPE_RESERVED;
75 pvh_bootparams.e820_entries++;
76 } else
77 xen_raw_printk("Warning: Can fit ISA range into e820\n");
78
79 pvh_bootparams.hdr.cmd_line_ptr =
80 pvh_start_info.cmdline_paddr;
81
82 /* The first module is always ramdisk. */
83 if (pvh_start_info.nr_modules) {
84 struct hvm_modlist_entry *modaddr =
85 __va(pvh_start_info.modlist_paddr);
86 pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
87 pvh_bootparams.hdr.ramdisk_size = modaddr->size;
88 }
89
90 /*
91 * See Documentation/x86/boot.txt.
92 *
93 * Version 2.12 supports Xen entry point but we will use default x86/PC
94 * environment (i.e. hardware_subarch 0).
95 */
96 pvh_bootparams.hdr.version = (2 << 8) | 12;
97 pvh_bootparams.hdr.type_of_loader = ((xen_guest ? 0x9 : 0xb) << 4) | 0;
98
99 x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
100}
101
102/*
103 * If we are trying to boot a Xen PVH guest, it is expected that the kernel
104 * will have been configured to provide the required override for this routine.
105 */
106void __init __weak xen_pvh_init(void)
107{
108 xen_raw_printk("Error: Missing xen PVH initialization\n");
109 BUG();
110}
111
112static void hypervisor_specific_init(bool xen_guest)
113{
114 if (xen_guest)
115 xen_pvh_init();
116}
117
118/*
119 * This routine (and those that it might call) should not use
120 * anything that lives in .bss since that segment will be cleared later.
121 */
122void __init xen_prepare_pvh(void)
123{
124
125 u32 msr = xen_cpuid_base();
126 bool xen_guest = !!msr;
127
128 if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
129 xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
130 pvh_start_info.magic);
131 BUG();
132 }
133
134 hypervisor_specific_init(xen_guest);
135
136 init_pvh_bootparams(xen_guest);
137}
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..1f8825bbaffb 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/platform/pvh/head.S
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 1ef391aa184d..e07abefd3d26 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -74,6 +74,7 @@ config XEN_DEBUG_FS
74 Enabling this option may incur a significant performance overhead. 74 Enabling this option may incur a significant performance overhead.
75 75
76config XEN_PVH 76config XEN_PVH
77 bool "Support for running as a PVH guest" 77 bool "Support for running as a Xen PVH guest"
78 depends on XEN && XEN_PVHVM && ACPI 78 depends on XEN && XEN_PVHVM && ACPI
79 select PVH
79 def_bool n 80 def_bool n
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index dd2550d33b38..084de77a109e 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,6 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y 2OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
3OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
4 3
5ifdef CONFIG_FUNCTION_TRACER 4ifdef CONFIG_FUNCTION_TRACER
6# Do not profile debug and lowlevel utilities 5# Do not profile debug and lowlevel utilities
@@ -38,7 +37,6 @@ obj-$(CONFIG_XEN_PV) += xen-asm.o
38obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o 37obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o
39 38
40obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o 39obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
41obj-$(CONFIG_XEN_PVH) += xen-pvh.o
42 40
43obj-$(CONFIG_EVENT_TRACING) += trace.o 41obj-$(CONFIG_EVENT_TRACING) += trace.o
44 42
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 02e3ab7ff242..35b7599d2d0b 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -6,103 +6,45 @@
6#include <asm/io_apic.h> 6#include <asm/io_apic.h>
7#include <asm/hypervisor.h> 7#include <asm/hypervisor.h>
8#include <asm/e820/api.h> 8#include <asm/e820/api.h>
9#include <asm/x86_init.h>
10 9
10#include <xen/xen.h>
11#include <asm/xen/interface.h> 11#include <asm/xen/interface.h>
12#include <asm/xen/hypercall.h> 12#include <asm/xen/hypercall.h>
13 13
14#include <xen/xen.h>
15#include <xen/interface/memory.h> 14#include <xen/interface/memory.h>
16#include <xen/interface/hvm/start_info.h>
17 15
18/* 16/*
19 * PVH variables. 17 * PVH variables.
20 * 18 *
21 * xen_pvh pvh_bootparams and pvh_start_info need to live in data segment 19 * The variable xen_pvh needs to live in the data segment since it is used
22 * since they are used after startup_{32|64}, which clear .bss, are invoked. 20 * after startup_{32|64} is invoked, which will clear the .bss segment.
23 */ 21 */
24bool xen_pvh __attribute__((section(".data"))) = 0; 22bool xen_pvh __attribute__((section(".data"))) = 0;
25struct boot_params pvh_bootparams __attribute__((section(".data")));
26struct hvm_start_info pvh_start_info __attribute__((section(".data")));
27
28unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
29 23
30static u64 pvh_get_root_pointer(void) 24void __init xen_pvh_init(void)
31{ 25{
32 return pvh_start_info.rsdp_paddr; 26 u32 msr;
27 u64 pfn;
28
29 xen_pvh = 1;
30 xen_start_flags = pvh_start_info.flags;
31
32 msr = cpuid_ebx(xen_cpuid_base() + 2);
33 pfn = __pa(hypercall_page);
34 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
33} 35}
34 36
35static void __init init_pvh_bootparams(void) 37void __init mem_map_via_hcall(struct boot_params *boot_params_p)
36{ 38{
37 struct xen_memory_map memmap; 39 struct xen_memory_map memmap;
38 int rc; 40 int rc;
39 41
40 memset(&pvh_bootparams, 0, sizeof(pvh_bootparams)); 42 memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
41 43 set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
42 memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
43 set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
44 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); 44 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
45 if (rc) { 45 if (rc) {
46 xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); 46 xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
47 BUG(); 47 BUG();
48 } 48 }
49 pvh_bootparams.e820_entries = memmap.nr_entries; 49 boot_params_p->e820_entries = memmap.nr_entries;
50
51 if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
52 pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
53 ISA_START_ADDRESS;
54 pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
55 ISA_END_ADDRESS - ISA_START_ADDRESS;
56 pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
57 E820_TYPE_RESERVED;
58 pvh_bootparams.e820_entries++;
59 } else
60 xen_raw_printk("Warning: Can fit ISA range into e820\n");
61
62 pvh_bootparams.hdr.cmd_line_ptr =
63 pvh_start_info.cmdline_paddr;
64
65 /* The first module is always ramdisk. */
66 if (pvh_start_info.nr_modules) {
67 struct hvm_modlist_entry *modaddr =
68 __va(pvh_start_info.modlist_paddr);
69 pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
70 pvh_bootparams.hdr.ramdisk_size = modaddr->size;
71 }
72
73 /*
74 * See Documentation/x86/boot.txt.
75 *
76 * Version 2.12 supports Xen entry point but we will use default x86/PC
77 * environment (i.e. hardware_subarch 0).
78 */
79 pvh_bootparams.hdr.version = (2 << 8) | 12;
80 pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
81
82 x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
83}
84
85/*
86 * This routine (and those that it might call) should not use
87 * anything that lives in .bss since that segment will be cleared later.
88 */
89void __init xen_prepare_pvh(void)
90{
91 u32 msr;
92 u64 pfn;
93
94 if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
95 xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
96 pvh_start_info.magic);
97 BUG();
98 }
99
100 xen_pvh = 1;
101 xen_start_flags = pvh_start_info.flags;
102
103 msr = cpuid_ebx(xen_cpuid_base() + 2);
104 pfn = __pa(hypercall_page);
105 wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
106
107 init_pvh_bootparams();
108} 50}
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index bb1c2da0381d..1e9ef0ba30a5 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -12,6 +12,7 @@
12#include <asm/segment.h> 12#include <asm/segment.h>
13#include <asm/asm-offsets.h> 13#include <asm/asm-offsets.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/asm.h>
15 16
16#include <xen/interface/xen.h> 17#include <xen/interface/xen.h>
17 18
@@ -24,6 +25,7 @@ ENTRY(xen_\name)
24 pop %r11 25 pop %r11
25 jmp \name 26 jmp \name
26END(xen_\name) 27END(xen_\name)
28_ASM_NOKPROBE(xen_\name)
27.endm 29.endm
28 30
29xen_pv_trap divide_error 31xen_pv_trap divide_error
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
index 4cca160782ab..f969d486855d 100644
--- a/drivers/gpu/drm/xen/Kconfig
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND
12 select DRM_KMS_HELPER 12 select DRM_KMS_HELPER
13 select VIDEOMODE_HELPERS 13 select VIDEOMODE_HELPERS
14 select XEN_XENBUS_FRONTEND 14 select XEN_XENBUS_FRONTEND
15 select XEN_FRONT_PGDIR_SHBUF
15 help 16 help
16 Choose this option if you want to enable a para-virtualized 17 Choose this option if you want to enable a para-virtualized
17 frontend DRM/KMS driver for Xen guest OSes. 18 frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
index 712afff5ffc3..825905f67faa 100644
--- a/drivers/gpu/drm/xen/Makefile
+++ b/drivers/gpu/drm/xen/Makefile
@@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \
4 xen_drm_front_kms.o \ 4 xen_drm_front_kms.o \
5 xen_drm_front_conn.o \ 5 xen_drm_front_conn.o \
6 xen_drm_front_evtchnl.o \ 6 xen_drm_front_evtchnl.o \
7 xen_drm_front_shbuf.o \
8 xen_drm_front_cfg.o \ 7 xen_drm_front_cfg.o \
9 xen_drm_front_gem.o 8 xen_drm_front_gem.o
10 9
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 6b6d5ab82ec3..4d3d36fc3a5d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -19,6 +19,7 @@
19#include <xen/xen.h> 19#include <xen/xen.h>
20#include <xen/xenbus.h> 20#include <xen/xenbus.h>
21 21
22#include <xen/xen-front-pgdir-shbuf.h>
22#include <xen/interface/io/displif.h> 23#include <xen/interface/io/displif.h>
23 24
24#include "xen_drm_front.h" 25#include "xen_drm_front.h"
@@ -26,28 +27,20 @@
26#include "xen_drm_front_evtchnl.h" 27#include "xen_drm_front_evtchnl.h"
27#include "xen_drm_front_gem.h" 28#include "xen_drm_front_gem.h"
28#include "xen_drm_front_kms.h" 29#include "xen_drm_front_kms.h"
29#include "xen_drm_front_shbuf.h"
30 30
31struct xen_drm_front_dbuf { 31struct xen_drm_front_dbuf {
32 struct list_head list; 32 struct list_head list;
33 u64 dbuf_cookie; 33 u64 dbuf_cookie;
34 u64 fb_cookie; 34 u64 fb_cookie;
35 struct xen_drm_front_shbuf *shbuf; 35
36 struct xen_front_pgdir_shbuf shbuf;
36}; 37};
37 38
38static int dbuf_add_to_list(struct xen_drm_front_info *front_info, 39static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
39 struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie) 40 struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
40{ 41{
41 struct xen_drm_front_dbuf *dbuf;
42
43 dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
44 if (!dbuf)
45 return -ENOMEM;
46
47 dbuf->dbuf_cookie = dbuf_cookie; 42 dbuf->dbuf_cookie = dbuf_cookie;
48 dbuf->shbuf = shbuf;
49 list_add(&dbuf->list, &front_info->dbuf_list); 43 list_add(&dbuf->list, &front_info->dbuf_list);
50 return 0;
51} 44}
52 45
53static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, 46static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
@@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
62 return NULL; 55 return NULL;
63} 56}
64 57
65static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
66{
67 struct xen_drm_front_dbuf *buf, *q;
68
69 list_for_each_entry_safe(buf, q, dbuf_list, list)
70 if (buf->fb_cookie == fb_cookie)
71 xen_drm_front_shbuf_flush(buf->shbuf);
72}
73
74static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) 58static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
75{ 59{
76 struct xen_drm_front_dbuf *buf, *q; 60 struct xen_drm_front_dbuf *buf, *q;
@@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
78 list_for_each_entry_safe(buf, q, dbuf_list, list) 62 list_for_each_entry_safe(buf, q, dbuf_list, list)
79 if (buf->dbuf_cookie == dbuf_cookie) { 63 if (buf->dbuf_cookie == dbuf_cookie) {
80 list_del(&buf->list); 64 list_del(&buf->list);
81 xen_drm_front_shbuf_unmap(buf->shbuf); 65 xen_front_pgdir_shbuf_unmap(&buf->shbuf);
82 xen_drm_front_shbuf_free(buf->shbuf); 66 xen_front_pgdir_shbuf_free(&buf->shbuf);
83 kfree(buf); 67 kfree(buf);
84 break; 68 break;
85 } 69 }
@@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list)
91 75
92 list_for_each_entry_safe(buf, q, dbuf_list, list) { 76 list_for_each_entry_safe(buf, q, dbuf_list, list) {
93 list_del(&buf->list); 77 list_del(&buf->list);
94 xen_drm_front_shbuf_unmap(buf->shbuf); 78 xen_front_pgdir_shbuf_unmap(&buf->shbuf);
95 xen_drm_front_shbuf_free(buf->shbuf); 79 xen_front_pgdir_shbuf_free(&buf->shbuf);
96 kfree(buf); 80 kfree(buf);
97 } 81 }
98} 82}
@@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
171 u32 bpp, u64 size, struct page **pages) 155 u32 bpp, u64 size, struct page **pages)
172{ 156{
173 struct xen_drm_front_evtchnl *evtchnl; 157 struct xen_drm_front_evtchnl *evtchnl;
174 struct xen_drm_front_shbuf *shbuf; 158 struct xen_drm_front_dbuf *dbuf;
175 struct xendispl_req *req; 159 struct xendispl_req *req;
176 struct xen_drm_front_shbuf_cfg buf_cfg; 160 struct xen_front_pgdir_shbuf_cfg buf_cfg;
177 unsigned long flags; 161 unsigned long flags;
178 int ret; 162 int ret;
179 163
@@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
181 if (unlikely(!evtchnl)) 165 if (unlikely(!evtchnl))
182 return -EIO; 166 return -EIO;
183 167
168 dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
169 if (!dbuf)
170 return -ENOMEM;
171
172 dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
173
184 memset(&buf_cfg, 0, sizeof(buf_cfg)); 174 memset(&buf_cfg, 0, sizeof(buf_cfg));
185 buf_cfg.xb_dev = front_info->xb_dev; 175 buf_cfg.xb_dev = front_info->xb_dev;
176 buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
186 buf_cfg.pages = pages; 177 buf_cfg.pages = pages;
187 buf_cfg.size = size; 178 buf_cfg.pgdir = &dbuf->shbuf;
188 buf_cfg.be_alloc = front_info->cfg.be_alloc; 179 buf_cfg.be_alloc = front_info->cfg.be_alloc;
189 180
190 shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); 181 ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
191 if (IS_ERR(shbuf)) 182 if (ret < 0)
192 return PTR_ERR(shbuf); 183 goto fail_shbuf_alloc;
193
194 ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
195 if (ret < 0) {
196 xen_drm_front_shbuf_free(shbuf);
197 return ret;
198 }
199 184
200 mutex_lock(&evtchnl->u.req.req_io_lock); 185 mutex_lock(&evtchnl->u.req.req_io_lock);
201 186
202 spin_lock_irqsave(&front_info->io_lock, flags); 187 spin_lock_irqsave(&front_info->io_lock, flags);
203 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); 188 req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
204 req->op.dbuf_create.gref_directory = 189 req->op.dbuf_create.gref_directory =
205 xen_drm_front_shbuf_get_dir_start(shbuf); 190 xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
206 req->op.dbuf_create.buffer_sz = size; 191 req->op.dbuf_create.buffer_sz = size;
207 req->op.dbuf_create.dbuf_cookie = dbuf_cookie; 192 req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
208 req->op.dbuf_create.width = width; 193 req->op.dbuf_create.width = width;
@@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
221 if (ret < 0) 206 if (ret < 0)
222 goto fail; 207 goto fail;
223 208
224 ret = xen_drm_front_shbuf_map(shbuf); 209 ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
225 if (ret < 0) 210 if (ret < 0)
226 goto fail; 211 goto fail;
227 212
@@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
230 215
231fail: 216fail:
232 mutex_unlock(&evtchnl->u.req.req_io_lock); 217 mutex_unlock(&evtchnl->u.req.req_io_lock);
218fail_shbuf_alloc:
233 dbuf_free(&front_info->dbuf_list, dbuf_cookie); 219 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
234 return ret; 220 return ret;
235} 221}
@@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
358 if (unlikely(conn_idx >= front_info->num_evt_pairs)) 344 if (unlikely(conn_idx >= front_info->num_evt_pairs))
359 return -EINVAL; 345 return -EINVAL;
360 346
361 dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
362 evtchnl = &front_info->evt_pairs[conn_idx].req; 347 evtchnl = &front_info->evt_pairs[conn_idx].req;
363 348
364 mutex_lock(&evtchnl->u.req.req_io_lock); 349 mutex_lock(&evtchnl->u.req.req_io_lock);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 47ff019d3aef..28bc501af450 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -22,7 +22,6 @@
22#include <xen/balloon.h> 22#include <xen/balloon.h>
23 23
24#include "xen_drm_front.h" 24#include "xen_drm_front.h"
25#include "xen_drm_front_shbuf.h"
26 25
27struct xen_gem_object { 26struct xen_gem_object {
28 struct drm_gem_object base; 27 struct drm_gem_object base;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
deleted file mode 100644
index d333b67cc1a0..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ /dev/null
@@ -1,414 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <drm/drmP.h>
12
13#if defined(CONFIG_X86)
14#include <drm/drm_cache.h>
15#endif
16#include <linux/errno.h>
17#include <linux/mm.h>
18
19#include <asm/xen/hypervisor.h>
20#include <xen/balloon.h>
21#include <xen/xen.h>
22#include <xen/xenbus.h>
23#include <xen/interface/io/ring.h>
24#include <xen/interface/io/displif.h>
25
26#include "xen_drm_front.h"
27#include "xen_drm_front_shbuf.h"
28
29struct xen_drm_front_shbuf_ops {
30 /*
31 * Calculate number of grefs required to handle this buffer,
32 * e.g. if grefs are required for page directory only or the buffer
33 * pages as well.
34 */
35 void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
36 /* Fill page directory according to para-virtual display protocol. */
37 void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
38 /* Claim grant references for the pages of the buffer. */
39 int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
40 grant_ref_t *priv_gref_head, int gref_idx);
41 /* Map grant references of the buffer. */
42 int (*map)(struct xen_drm_front_shbuf *buf);
43 /* Unmap grant references of the buffer. */
44 int (*unmap)(struct xen_drm_front_shbuf *buf);
45};
46
47grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
48{
49 if (!buf->grefs)
50 return GRANT_INVALID_REF;
51
52 return buf->grefs[0];
53}
54
55int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
56{
57 if (buf->ops->map)
58 return buf->ops->map(buf);
59
60 /* no need to map own grant references */
61 return 0;
62}
63
64int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
65{
66 if (buf->ops->unmap)
67 return buf->ops->unmap(buf);
68
69 /* no need to unmap own grant references */
70 return 0;
71}
72
73void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
74{
75#if defined(CONFIG_X86)
76 drm_clflush_pages(buf->pages, buf->num_pages);
77#endif
78}
79
80void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
81{
82 if (buf->grefs) {
83 int i;
84
85 for (i = 0; i < buf->num_grefs; i++)
86 if (buf->grefs[i] != GRANT_INVALID_REF)
87 gnttab_end_foreign_access(buf->grefs[i],
88 0, 0UL);
89 }
90 kfree(buf->grefs);
91 kfree(buf->directory);
92 kfree(buf);
93}
94
95/*
96 * number of grefs a page can hold with respect to the
97 * struct xendispl_page_directory header
98 */
99#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
100 offsetof(struct xendispl_page_directory, gref)) / \
101 sizeof(grant_ref_t))
102
103static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
104{
105 /* number of pages the page directory consumes itself */
106 return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
107}
108
109static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
110{
111 /* only for pages the page directory consumes itself */
112 buf->num_grefs = get_num_pages_dir(buf);
113}
114
115static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
116{
117 /*
118 * number of pages the page directory consumes itself
119 * plus grefs for the buffer pages
120 */
121 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
122}
123
124#define xen_page_to_vaddr(page) \
125 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
126
127static int backend_unmap(struct xen_drm_front_shbuf *buf)
128{
129 struct gnttab_unmap_grant_ref *unmap_ops;
130 int i, ret;
131
132 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
133 return 0;
134
135 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
136 GFP_KERNEL);
137 if (!unmap_ops) {
138 DRM_ERROR("Failed to get memory while unmapping\n");
139 return -ENOMEM;
140 }
141
142 for (i = 0; i < buf->num_pages; i++) {
143 phys_addr_t addr;
144
145 addr = xen_page_to_vaddr(buf->pages[i]);
146 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
147 buf->backend_map_handles[i]);
148 }
149
150 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
151 buf->num_pages);
152
153 for (i = 0; i < buf->num_pages; i++) {
154 if (unlikely(unmap_ops[i].status != GNTST_okay))
155 DRM_ERROR("Failed to unmap page %d: %d\n",
156 i, unmap_ops[i].status);
157 }
158
159 if (ret)
160 DRM_ERROR("Failed to unmap grant references, ret %d", ret);
161
162 kfree(unmap_ops);
163 kfree(buf->backend_map_handles);
164 buf->backend_map_handles = NULL;
165 return ret;
166}
167
168static int backend_map(struct xen_drm_front_shbuf *buf)
169{
170 struct gnttab_map_grant_ref *map_ops = NULL;
171 unsigned char *ptr;
172 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
173
174 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
175 if (!map_ops)
176 return -ENOMEM;
177
178 buf->backend_map_handles = kcalloc(buf->num_pages,
179 sizeof(*buf->backend_map_handles),
180 GFP_KERNEL);
181 if (!buf->backend_map_handles) {
182 kfree(map_ops);
183 return -ENOMEM;
184 }
185
186 /*
187 * read page directory to get grefs from the backend: for external
188 * buffer we only allocate buf->grefs for the page directory,
189 * so buf->num_grefs has number of pages in the page directory itself
190 */
191 ptr = buf->directory;
192 grefs_left = buf->num_pages;
193 cur_page = 0;
194 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
195 struct xendispl_page_directory *page_dir =
196 (struct xendispl_page_directory *)ptr;
197 int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
198
199 if (to_copy > grefs_left)
200 to_copy = grefs_left;
201
202 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
203 phys_addr_t addr;
204
205 addr = xen_page_to_vaddr(buf->pages[cur_page]);
206 gnttab_set_map_op(&map_ops[cur_page], addr,
207 GNTMAP_host_map,
208 page_dir->gref[cur_gref],
209 buf->xb_dev->otherend_id);
210 cur_page++;
211 }
212
213 grefs_left -= to_copy;
214 ptr += PAGE_SIZE;
215 }
216 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
217
218 /* save handles even if error, so we can unmap */
219 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
220 buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
221 if (unlikely(map_ops[cur_page].status != GNTST_okay))
222 DRM_ERROR("Failed to map page %d: %d\n",
223 cur_page, map_ops[cur_page].status);
224 }
225
226 if (ret) {
227 DRM_ERROR("Failed to map grant references, ret %d", ret);
228 backend_unmap(buf);
229 }
230
231 kfree(map_ops);
232 return ret;
233}
234
235static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
236{
237 struct xendispl_page_directory *page_dir;
238 unsigned char *ptr;
239 int i, num_pages_dir;
240
241 ptr = buf->directory;
242 num_pages_dir = get_num_pages_dir(buf);
243
244 /* fill only grefs for the page directory itself */
245 for (i = 0; i < num_pages_dir - 1; i++) {
246 page_dir = (struct xendispl_page_directory *)ptr;
247
248 page_dir->gref_dir_next_page = buf->grefs[i + 1];
249 ptr += PAGE_SIZE;
250 }
251 /* last page must say there is no more pages */
252 page_dir = (struct xendispl_page_directory *)ptr;
253 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
254}
255
256static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
257{
258 unsigned char *ptr;
259 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
260
261 ptr = buf->directory;
262 num_pages_dir = get_num_pages_dir(buf);
263
264 /*
265 * while copying, skip grefs at start, they are for pages
266 * granted for the page directory itself
267 */
268 cur_gref = num_pages_dir;
269 grefs_left = buf->num_pages;
270 for (i = 0; i < num_pages_dir; i++) {
271 struct xendispl_page_directory *page_dir =
272 (struct xendispl_page_directory *)ptr;
273
274 if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
275 to_copy = grefs_left;
276 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
277 } else {
278 to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
279 page_dir->gref_dir_next_page = buf->grefs[i + 1];
280 }
281 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
282 to_copy * sizeof(grant_ref_t));
283 ptr += PAGE_SIZE;
284 grefs_left -= to_copy;
285 cur_gref += to_copy;
286 }
287}
288
289static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
290 grant_ref_t *priv_gref_head,
291 int gref_idx)
292{
293 int i, cur_ref, otherend_id;
294
295 otherend_id = buf->xb_dev->otherend_id;
296 for (i = 0; i < buf->num_pages; i++) {
297 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
298 if (cur_ref < 0)
299 return cur_ref;
300
301 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
302 xen_page_to_gfn(buf->pages[i]),
303 0);
304 buf->grefs[gref_idx++] = cur_ref;
305 }
306 return 0;
307}
308
309static int grant_references(struct xen_drm_front_shbuf *buf)
310{
311 grant_ref_t priv_gref_head;
312 int ret, i, j, cur_ref;
313 int otherend_id, num_pages_dir;
314
315 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
316 if (ret < 0) {
317 DRM_ERROR("Cannot allocate grant references\n");
318 return ret;
319 }
320
321 otherend_id = buf->xb_dev->otherend_id;
322 j = 0;
323 num_pages_dir = get_num_pages_dir(buf);
324 for (i = 0; i < num_pages_dir; i++) {
325 unsigned long frame;
326
327 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
328 if (cur_ref < 0)
329 return cur_ref;
330
331 frame = xen_page_to_gfn(virt_to_page(buf->directory +
332 PAGE_SIZE * i));
333 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
334 buf->grefs[j++] = cur_ref;
335 }
336
337 if (buf->ops->grant_refs_for_buffer) {
338 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
339 if (ret)
340 return ret;
341 }
342
343 gnttab_free_grant_references(priv_gref_head);
344 return 0;
345}
346
347static int alloc_storage(struct xen_drm_front_shbuf *buf)
348{
349 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
350 if (!buf->grefs)
351 return -ENOMEM;
352
353 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
354 if (!buf->directory)
355 return -ENOMEM;
356
357 return 0;
358}
359
360/*
361 * For be allocated buffers we don't need grant_refs_for_buffer as those
362 * grant references are allocated at backend side
363 */
364static const struct xen_drm_front_shbuf_ops backend_ops = {
365 .calc_num_grefs = backend_calc_num_grefs,
366 .fill_page_dir = backend_fill_page_dir,
367 .map = backend_map,
368 .unmap = backend_unmap
369};
370
371/* For locally granted references we do not need to map/unmap the references */
372static const struct xen_drm_front_shbuf_ops local_ops = {
373 .calc_num_grefs = guest_calc_num_grefs,
374 .fill_page_dir = guest_fill_page_dir,
375 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
376};
377
378struct xen_drm_front_shbuf *
379xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
380{
381 struct xen_drm_front_shbuf *buf;
382 int ret;
383
384 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
385 if (!buf)
386 return ERR_PTR(-ENOMEM);
387
388 if (cfg->be_alloc)
389 buf->ops = &backend_ops;
390 else
391 buf->ops = &local_ops;
392
393 buf->xb_dev = cfg->xb_dev;
394 buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
395 buf->pages = cfg->pages;
396
397 buf->ops->calc_num_grefs(buf);
398
399 ret = alloc_storage(buf);
400 if (ret)
401 goto fail;
402
403 ret = grant_references(buf);
404 if (ret)
405 goto fail;
406
407 buf->ops->fill_page_dir(buf);
408
409 return buf;
410
411fail:
412 xen_drm_front_shbuf_free(buf);
413 return ERR_PTR(ret);
414}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
deleted file mode 100644
index 7545c692539e..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_DRM_FRONT_SHBUF_H_
12#define __XEN_DRM_FRONT_SHBUF_H_
13
14#include <linux/kernel.h>
15#include <linux/scatterlist.h>
16
17#include <xen/grant_table.h>
18
19struct xen_drm_front_shbuf {
20 /*
21 * number of references granted for the backend use:
22 * - for allocated/imported dma-buf's this holds number of grant
23 * references for the page directory and pages of the buffer
24 * - for the buffer provided by the backend this holds number of
25 * grant references for the page directory as grant references for
26 * the buffer will be provided by the backend
27 */
28 int num_grefs;
29 grant_ref_t *grefs;
30 unsigned char *directory;
31
32 int num_pages;
33 struct page **pages;
34
35 struct xenbus_device *xb_dev;
36
37 /* these are the ops used internally depending on be_alloc mode */
38 const struct xen_drm_front_shbuf_ops *ops;
39
40 /* Xen map handles for the buffer allocated by the backend */
41 grant_handle_t *backend_map_handles;
42};
43
44struct xen_drm_front_shbuf_cfg {
45 struct xenbus_device *xb_dev;
46 size_t size;
47 struct page **pages;
48 bool be_alloc;
49};
50
51struct xen_drm_front_shbuf *
52xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
53
54grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
55
56int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
57
58int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
59
60void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
61
62void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
63
64#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 815b9e9bb975..838b66a9a0e7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -340,4 +340,7 @@ config XEN_SYMS
340config XEN_HAVE_VPMU 340config XEN_HAVE_VPMU
341 bool 341 bool
342 342
343config XEN_FRONT_PGDIR_SHBUF
344 tristate
345
343endmenu 346endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 3e542f60f29f..c48927a58e10 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -44,3 +44,4 @@ xen-gntdev-y := gntdev.o
44xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o 44xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
45xen-gntalloc-y := gntalloc.o 45xen-gntalloc-y := gntalloc.o
46xen-privcmd-y := privcmd.o privcmd-buf.o 46xen-privcmd-y := privcmd.o privcmd-buf.o
47obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
new file mode 100644
index 000000000000..48a658dc7ccf
--- /dev/null
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -0,0 +1,553 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen frontend/backend page directory based shared buffer
5 * helper module.
6 *
7 * Copyright (C) 2018 EPAM Systems Inc.
8 *
9 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15
16#include <asm/xen/hypervisor.h>
17#include <xen/balloon.h>
18#include <xen/xen.h>
19#include <xen/xenbus.h>
20#include <xen/interface/io/ring.h>
21
22#include <xen/xen-front-pgdir-shbuf.h>
23
24#ifndef GRANT_INVALID_REF
25/*
26 * FIXME: usage of grant reference 0 as invalid grant reference:
27 * grant reference 0 is valid, but never exposed to a PV driver,
28 * because of the fact it is already in use/reserved by the PV console.
29 */
30#define GRANT_INVALID_REF 0
31#endif
32
33/**
34 * This structure represents the structure of a shared page
35 * that contains grant references to the pages of the shared
36 * buffer. This structure is common to many Xen para-virtualized
37 * protocols at include/xen/interface/io/
38 */
39struct xen_page_directory {
40 grant_ref_t gref_dir_next_page;
41 grant_ref_t gref[1]; /* Variable length */
42};
43
44/**
45 * Shared buffer ops which are differently implemented
46 * depending on the allocation mode, e.g. if the buffer
47 * is allocated by the corresponding backend or frontend.
48 * Some of the operations.
49 */
50struct xen_front_pgdir_shbuf_ops {
51 /*
52 * Calculate number of grefs required to handle this buffer,
53 * e.g. if grefs are required for page directory only or the buffer
54 * pages as well.
55 */
56 void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
57
58 /* Fill page directory according to para-virtual display protocol. */
59 void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
60
61 /* Claim grant references for the pages of the buffer. */
62 int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
63 grant_ref_t *priv_gref_head, int gref_idx);
64
65 /* Map grant references of the buffer. */
66 int (*map)(struct xen_front_pgdir_shbuf *buf);
67
68 /* Unmap grant references of the buffer. */
69 int (*unmap)(struct xen_front_pgdir_shbuf *buf);
70};
71
72/**
73 * Get granted reference to the very first page of the
74 * page directory. Usually this is passed to the backend,
75 * so it can find/fill the grant references to the buffer's
76 * pages.
77 *
78 * \param buf shared buffer which page directory is of interest.
79 * \return granted reference to the very first page of the
80 * page directory.
81 */
82grant_ref_t
83xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
84{
85 if (!buf->grefs)
86 return GRANT_INVALID_REF;
87
88 return buf->grefs[0];
89}
90EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
91
92/**
93 * Map granted references of the shared buffer.
94 *
95 * Depending on the shared buffer mode of allocation
96 * (be_alloc flag) this can either do nothing (for buffers
97 * shared by the frontend itself) or map the provided granted
98 * references onto the backing storage (buf->pages).
99 *
100 * \param buf shared buffer which grants to be maped.
101 * \return zero on success or a negative number on failure.
102 */
103int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
104{
105 if (buf->ops && buf->ops->map)
106 return buf->ops->map(buf);
107
108 /* No need to map own grant references. */
109 return 0;
110}
111EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
112
113/**
114 * Unmap granted references of the shared buffer.
115 *
116 * Depending on the shared buffer mode of allocation
117 * (be_alloc flag) this can either do nothing (for buffers
118 * shared by the frontend itself) or unmap the provided granted
119 * references.
120 *
121 * \param buf shared buffer which grants to be unmaped.
122 * \return zero on success or a negative number on failure.
123 */
124int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
125{
126 if (buf->ops && buf->ops->unmap)
127 return buf->ops->unmap(buf);
128
129 /* No need to unmap own grant references. */
130 return 0;
131}
132EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
133
134/**
135 * Free all the resources of the shared buffer.
136 *
137 * \param buf shared buffer which resources to be freed.
138 */
139void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
140{
141 if (buf->grefs) {
142 int i;
143
144 for (i = 0; i < buf->num_grefs; i++)
145 if (buf->grefs[i] != GRANT_INVALID_REF)
146 gnttab_end_foreign_access(buf->grefs[i],
147 0, 0UL);
148 }
149 kfree(buf->grefs);
150 kfree(buf->directory);
151}
152EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
153
154/*
155 * Number of grefs a page can hold with respect to the
156 * struct xen_page_directory header.
157 */
158#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
159 offsetof(struct xen_page_directory, \
160 gref)) / sizeof(grant_ref_t))
161
162/**
163 * Get the number of pages the page directory consumes itself.
164 *
165 * \param buf shared buffer.
166 */
167static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
168{
169 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
170}
171
172/**
173 * Calculate the number of grant references needed to share the buffer
174 * and its pages when backend allocates the buffer.
175 *
176 * \param buf shared buffer.
177 */
178static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
179{
180 /* Only for pages the page directory consumes itself. */
181 buf->num_grefs = get_num_pages_dir(buf);
182}
183
184/**
185 * Calculate the number of grant references needed to share the buffer
186 * and its pages when frontend allocates the buffer.
187 *
188 * \param buf shared buffer.
189 */
190static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
191{
192 /*
193 * Number of pages the page directory consumes itself
194 * plus grefs for the buffer pages.
195 */
196 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197}
198
199#define xen_page_to_vaddr(page) \
200 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
201
202/**
203 * Unmap the buffer previously mapped with grant references
204 * provided by the backend.
205 *
206 * \param buf shared buffer.
207 * \return zero on success or a negative number on failure.
208 */
209static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
210{
211 struct gnttab_unmap_grant_ref *unmap_ops;
212 int i, ret;
213
214 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
215 return 0;
216
217 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
218 GFP_KERNEL);
219 if (!unmap_ops)
220 return -ENOMEM;
221
222 for (i = 0; i < buf->num_pages; i++) {
223 phys_addr_t addr;
224
225 addr = xen_page_to_vaddr(buf->pages[i]);
226 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
227 buf->backend_map_handles[i]);
228 }
229
230 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
231 buf->num_pages);
232
233 for (i = 0; i < buf->num_pages; i++) {
234 if (unlikely(unmap_ops[i].status != GNTST_okay))
235 dev_err(&buf->xb_dev->dev,
236 "Failed to unmap page %d: %d\n",
237 i, unmap_ops[i].status);
238 }
239
240 if (ret)
241 dev_err(&buf->xb_dev->dev,
242 "Failed to unmap grant references, ret %d", ret);
243
244 kfree(unmap_ops);
245 kfree(buf->backend_map_handles);
246 buf->backend_map_handles = NULL;
247 return ret;
248}
249
250/**
251 * Map the buffer with grant references provided by the backend.
252 *
253 * \param buf shared buffer.
254 * \return zero on success or a negative number on failure.
255 */
256static int backend_map(struct xen_front_pgdir_shbuf *buf)
257{
258 struct gnttab_map_grant_ref *map_ops = NULL;
259 unsigned char *ptr;
260 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
261
262 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
263 if (!map_ops)
264 return -ENOMEM;
265
266 buf->backend_map_handles = kcalloc(buf->num_pages,
267 sizeof(*buf->backend_map_handles),
268 GFP_KERNEL);
269 if (!buf->backend_map_handles) {
270 kfree(map_ops);
271 return -ENOMEM;
272 }
273
274 /*
275 * Read page directory to get grefs from the backend: for external
276 * buffer we only allocate buf->grefs for the page directory,
277 * so buf->num_grefs has number of pages in the page directory itself.
278 */
279 ptr = buf->directory;
280 grefs_left = buf->num_pages;
281 cur_page = 0;
282 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
283 struct xen_page_directory *page_dir =
284 (struct xen_page_directory *)ptr;
285 int to_copy = XEN_NUM_GREFS_PER_PAGE;
286
287 if (to_copy > grefs_left)
288 to_copy = grefs_left;
289
290 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
291 phys_addr_t addr;
292
293 addr = xen_page_to_vaddr(buf->pages[cur_page]);
294 gnttab_set_map_op(&map_ops[cur_page], addr,
295 GNTMAP_host_map,
296 page_dir->gref[cur_gref],
297 buf->xb_dev->otherend_id);
298 cur_page++;
299 }
300
301 grefs_left -= to_copy;
302 ptr += PAGE_SIZE;
303 }
304 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
305
306 /* Save handles even if error, so we can unmap. */
307 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
308 buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
309 if (unlikely(map_ops[cur_page].status != GNTST_okay))
310 dev_err(&buf->xb_dev->dev,
311 "Failed to map page %d: %d\n",
312 cur_page, map_ops[cur_page].status);
313 }
314
315 if (ret) {
316 dev_err(&buf->xb_dev->dev,
317 "Failed to map grant references, ret %d", ret);
318 backend_unmap(buf);
319 }
320
321 kfree(map_ops);
322 return ret;
323}
324
325/**
326 * Fill page directory with grant references to the pages of the
327 * page directory itself.
328 *
329 * The grant references to the buffer pages are provided by the
330 * backend in this case.
331 *
332 * \param buf shared buffer.
333 */
334static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
335{
336 struct xen_page_directory *page_dir;
337 unsigned char *ptr;
338 int i, num_pages_dir;
339
340 ptr = buf->directory;
341 num_pages_dir = get_num_pages_dir(buf);
342
343 /* Fill only grefs for the page directory itself. */
344 for (i = 0; i < num_pages_dir - 1; i++) {
345 page_dir = (struct xen_page_directory *)ptr;
346
347 page_dir->gref_dir_next_page = buf->grefs[i + 1];
348 ptr += PAGE_SIZE;
349 }
350 /* Last page must say there is no more pages. */
351 page_dir = (struct xen_page_directory *)ptr;
352 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
353}
354
355/**
356 * Fill page directory with grant references to the pages of the
357 * page directory and the buffer we share with the backend.
358 *
359 * \param buf shared buffer.
360 */
361static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
362{
363 unsigned char *ptr;
364 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
365
366 ptr = buf->directory;
367 num_pages_dir = get_num_pages_dir(buf);
368
369 /*
370 * While copying, skip grefs at start, they are for pages
371 * granted for the page directory itself.
372 */
373 cur_gref = num_pages_dir;
374 grefs_left = buf->num_pages;
375 for (i = 0; i < num_pages_dir; i++) {
376 struct xen_page_directory *page_dir =
377 (struct xen_page_directory *)ptr;
378
379 if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
380 to_copy = grefs_left;
381 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
382 } else {
383 to_copy = XEN_NUM_GREFS_PER_PAGE;
384 page_dir->gref_dir_next_page = buf->grefs[i + 1];
385 }
386 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
387 to_copy * sizeof(grant_ref_t));
388 ptr += PAGE_SIZE;
389 grefs_left -= to_copy;
390 cur_gref += to_copy;
391 }
392}
393
394/**
395 * Grant references to the frontend's buffer pages.
396 *
397 * These will be shared with the backend, so it can
398 * access the buffer's data.
399 *
400 * \param buf shared buffer.
401 * \return zero on success or a negative number on failure.
402 */
403static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
404 grant_ref_t *priv_gref_head,
405 int gref_idx)
406{
407 int i, cur_ref, otherend_id;
408
409 otherend_id = buf->xb_dev->otherend_id;
410 for (i = 0; i < buf->num_pages; i++) {
411 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
412 if (cur_ref < 0)
413 return cur_ref;
414
415 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
416 xen_page_to_gfn(buf->pages[i]),
417 0);
418 buf->grefs[gref_idx++] = cur_ref;
419 }
420 return 0;
421}
422
423/**
424 * Grant all the references needed to share the buffer.
425 *
426 * Grant references to the page directory pages and, if
427 * needed, also to the pages of the shared buffer data.
428 *
429 * \param buf shared buffer.
430 * \return zero on success or a negative number on failure.
431 */
432static int grant_references(struct xen_front_pgdir_shbuf *buf)
433{
434 grant_ref_t priv_gref_head;
435 int ret, i, j, cur_ref;
436 int otherend_id, num_pages_dir;
437
438 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
439 if (ret < 0) {
440 dev_err(&buf->xb_dev->dev,
441 "Cannot allocate grant references\n");
442 return ret;
443 }
444
445 otherend_id = buf->xb_dev->otherend_id;
446 j = 0;
447 num_pages_dir = get_num_pages_dir(buf);
448 for (i = 0; i < num_pages_dir; i++) {
449 unsigned long frame;
450
451 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
452 if (cur_ref < 0)
453 return cur_ref;
454
455 frame = xen_page_to_gfn(virt_to_page(buf->directory +
456 PAGE_SIZE * i));
457 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
458 buf->grefs[j++] = cur_ref;
459 }
460
461 if (buf->ops->grant_refs_for_buffer) {
462 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
463 if (ret)
464 return ret;
465 }
466
467 gnttab_free_grant_references(priv_gref_head);
468 return 0;
469}
470
471/**
472 * Allocate all required structures to mange shared buffer.
473 *
474 * \param buf shared buffer.
475 * \return zero on success or a negative number on failure.
476 */
477static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
478{
479 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
480 if (!buf->grefs)
481 return -ENOMEM;
482
483 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
484 if (!buf->directory)
485 return -ENOMEM;
486
487 return 0;
488}
489
490/*
491 * For backend allocated buffers we don't need grant_refs_for_buffer
492 * as those grant references are allocated at backend side.
493 */
494static const struct xen_front_pgdir_shbuf_ops backend_ops = {
495 .calc_num_grefs = backend_calc_num_grefs,
496 .fill_page_dir = backend_fill_page_dir,
497 .map = backend_map,
498 .unmap = backend_unmap
499};
500
501/*
502 * For locally granted references we do not need to map/unmap
503 * the references.
504 */
505static const struct xen_front_pgdir_shbuf_ops local_ops = {
506 .calc_num_grefs = guest_calc_num_grefs,
507 .fill_page_dir = guest_fill_page_dir,
508 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
509};
510
511/**
512 * Allocate a new instance of a shared buffer.
513 *
514 * \param cfg configuration to be used while allocating a new shared buffer.
515 * \return zero on success or a negative number on failure.
516 */
517int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
518{
519 struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
520 int ret;
521
522 if (cfg->be_alloc)
523 buf->ops = &backend_ops;
524 else
525 buf->ops = &local_ops;
526 buf->xb_dev = cfg->xb_dev;
527 buf->num_pages = cfg->num_pages;
528 buf->pages = cfg->pages;
529
530 buf->ops->calc_num_grefs(buf);
531
532 ret = alloc_storage(buf);
533 if (ret)
534 goto fail;
535
536 ret = grant_references(buf);
537 if (ret)
538 goto fail;
539
540 buf->ops->fill_page_dir(buf);
541
542 return 0;
543
544fail:
545 xen_front_pgdir_shbuf_free(buf);
546 return ret;
547}
548EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
549
550MODULE_DESCRIPTION("Xen frontend/backend page directory based "
551 "shared buffer handling");
552MODULE_AUTHOR("Oleksandr Andrushchenko");
553MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 59661db144e5..097410a7cdb7 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -106,7 +106,8 @@ static void pcistub_device_release(struct kref *kref)
106 * is called from "unbind" which takes a device_lock mutex. 106 * is called from "unbind" which takes a device_lock mutex.
107 */ 107 */
108 __pci_reset_function_locked(dev); 108 __pci_reset_function_locked(dev);
109 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) 109 if (dev_data &&
110 pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
110 dev_info(&dev->dev, "Could not reload PCI state\n"); 111 dev_info(&dev->dev, "Could not reload PCI state\n");
111 else 112 else
112 pci_restore_state(dev); 113 pci_restore_state(dev);
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 648415976ead..50af9ea2ff1e 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -33,7 +33,7 @@
33 * | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE 33 * | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
34 * | | ("xEn3" with the 0x80 bit of the "E" set). 34 * | | ("xEn3" with the 0x80 bit of the "E" set).
35 * 4 +----------------+ 35 * 4 +----------------+
36 * | version | Version of this structure. Current version is 0. New 36 * | version | Version of this structure. Current version is 1. New
37 * | | versions are guaranteed to be backwards-compatible. 37 * | | versions are guaranteed to be backwards-compatible.
38 * 8 +----------------+ 38 * 8 +----------------+
39 * | flags | SIF_xxx flags. 39 * | flags | SIF_xxx flags.
@@ -48,6 +48,15 @@
48 * 32 +----------------+ 48 * 32 +----------------+
49 * | rsdp_paddr | Physical address of the RSDP ACPI data structure. 49 * | rsdp_paddr | Physical address of the RSDP ACPI data structure.
50 * 40 +----------------+ 50 * 40 +----------------+
51 * | memmap_paddr | Physical address of the (optional) memory map. Only
52 * | | present in version 1 and newer of the structure.
53 * 48 +----------------+
54 * | memmap_entries | Number of entries in the memory map table. Zero
55 * | | if there is no memory map being provided. Only
56 * | | present in version 1 and newer of the structure.
57 * 52 +----------------+
58 * | reserved | Version 1 and newer only.
59 * 56 +----------------+
51 * 60 *
52 * The layout of each entry in the module structure is the following: 61 * The layout of each entry in the module structure is the following:
53 * 62 *
@@ -62,14 +71,52 @@
62 * | reserved | 71 * | reserved |
63 * 32 +----------------+ 72 * 32 +----------------+
64 * 73 *
74 * The layout of each entry in the memory map table is as follows:
75 *
76 * 0 +----------------+
77 * | addr | Base address
78 * 8 +----------------+
79 * | size | Size of mapping in bytes
80 * 16 +----------------+
81 * | type | Type of mapping as defined between the hypervisor
82 * | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below.
83 * 20 +----------------|
84 * | reserved |
85 * 24 +----------------+
86 *
65 * The address and sizes are always a 64bit little endian unsigned integer. 87 * The address and sizes are always a 64bit little endian unsigned integer.
66 * 88 *
67 * NB: Xen on x86 will always try to place all the data below the 4GiB 89 * NB: Xen on x86 will always try to place all the data below the 4GiB
68 * boundary. 90 * boundary.
91 *
92 * Version numbers of the hvm_start_info structure have evolved like this:
93 *
94 * Version 0: Initial implementation.
95 *
96 * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
97 * padding) to the end of the hvm_start_info struct. These new
98 * fields can be used to pass a memory map to the guest. The
99 * memory map is optional and so guests that understand version 1
100 * of the structure must check that memmap_entries is non-zero
101 * before trying to read the memory map.
69 */ 102 */
70#define XEN_HVM_START_MAGIC_VALUE 0x336ec578 103#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
71 104
72/* 105/*
106 * The values used in the type field of the memory map table entries are
107 * defined below and match the Address Range Types as defined in the "System
108 * Address Map Interfaces" section of the ACPI Specification. Please refer to
109 * section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications
110 */
111#define XEN_HVM_MEMMAP_TYPE_RAM 1
112#define XEN_HVM_MEMMAP_TYPE_RESERVED 2
113#define XEN_HVM_MEMMAP_TYPE_ACPI 3
114#define XEN_HVM_MEMMAP_TYPE_NVS 4
115#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5
116#define XEN_HVM_MEMMAP_TYPE_DISABLED 6
117#define XEN_HVM_MEMMAP_TYPE_PMEM 7
118
119/*
73 * C representation of the x86/HVM start info layout. 120 * C representation of the x86/HVM start info layout.
74 * 121 *
75 * The canonical definition of this layout is above, this is just a way to 122 * The canonical definition of this layout is above, this is just a way to
@@ -86,6 +133,13 @@ struct hvm_start_info {
86 uint64_t cmdline_paddr; /* Physical address of the command line. */ 133 uint64_t cmdline_paddr; /* Physical address of the command line. */
87 uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */ 134 uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
88 /* structure. */ 135 /* structure. */
136 /* All following fields only present in version 1 and newer */
137 uint64_t memmap_paddr; /* Physical address of an array of */
138 /* hvm_memmap_table_entry. */
139 uint32_t memmap_entries; /* Number of entries in the memmap table. */
140 /* Value will be zero if there is no memory */
141 /* map being provided. */
142 uint32_t reserved; /* Must be zero. */
89}; 143};
90 144
91struct hvm_modlist_entry { 145struct hvm_modlist_entry {
@@ -95,4 +149,11 @@ struct hvm_modlist_entry {
95 uint64_t reserved; 149 uint64_t reserved;
96}; 150};
97 151
152struct hvm_memmap_table_entry {
153 uint64_t addr; /* Base address of the memory region */
154 uint64_t size; /* Size of the memory region in bytes */
155 uint32_t type; /* Mapping type */
156 uint32_t reserved; /* Must be zero for Version 1. */
157};
158
98#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */ 159#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */
diff --git a/include/xen/xen-front-pgdir-shbuf.h b/include/xen/xen-front-pgdir-shbuf.h
new file mode 100644
index 000000000000..150ef7ec51ec
--- /dev/null
+++ b/include/xen/xen-front-pgdir-shbuf.h
@@ -0,0 +1,89 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen frontend/backend page directory based shared buffer
5 * helper module.
6 *
7 * Copyright (C) 2018 EPAM Systems Inc.
8 *
9 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10 */
11
12#ifndef __XEN_FRONT_PGDIR_SHBUF_H_
13#define __XEN_FRONT_PGDIR_SHBUF_H_
14
15#include <linux/kernel.h>
16
17#include <xen/grant_table.h>
18
19struct xen_front_pgdir_shbuf_ops;
20
21struct xen_front_pgdir_shbuf {
22 /*
23 * Number of references granted for the backend use:
24 *
25 * - for frontend allocated/imported buffers this holds the number
26 * of grant references for the page directory and the pages
27 * of the buffer
28 *
29 * - for the buffer provided by the backend this only holds the number
30 * of grant references for the page directory itself as grant
31 * references for the buffer will be provided by the backend.
32 */
33 int num_grefs;
34 grant_ref_t *grefs;
35 /* Page directory backing storage. */
36 u8 *directory;
37
38 /*
39 * Number of pages for the shared buffer itself (excluding the page
40 * directory).
41 */
42 int num_pages;
43 /*
44 * Backing storage of the shared buffer: these are the pages being
45 * shared.
46 */
47 struct page **pages;
48
49 struct xenbus_device *xb_dev;
50
51 /* These are the ops used internally depending on be_alloc mode. */
52 const struct xen_front_pgdir_shbuf_ops *ops;
53
54 /* Xen map handles for the buffer allocated by the backend. */
55 grant_handle_t *backend_map_handles;
56};
57
58struct xen_front_pgdir_shbuf_cfg {
59 struct xenbus_device *xb_dev;
60
61 /* Number of pages of the buffer backing storage. */
62 int num_pages;
63 /* Pages of the buffer to be shared. */
64 struct page **pages;
65
66 /*
67 * This is allocated outside because there are use-cases when
68 * the buffer structure is allocated as a part of a bigger one.
69 */
70 struct xen_front_pgdir_shbuf *pgdir;
71 /*
72 * Mode of grant reference sharing: if set then backend will share
73 * grant references to the buffer with the frontend.
74 */
75 int be_alloc;
76};
77
78int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg);
79
80grant_ref_t
81xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf);
82
83int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf);
84
85int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf);
86
87void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf);
88
89#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index d7a2678da77f..0e2156786ad2 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,9 @@ extern bool xen_pvh;
29 29
30extern uint32_t xen_start_flags; 30extern uint32_t xen_start_flags;
31 31
32#include <xen/interface/hvm/start_info.h>
33extern struct hvm_start_info pvh_start_info;
34
32#ifdef CONFIG_XEN_DOM0 35#ifdef CONFIG_XEN_DOM0
33#include <xen/interface/xen.h> 36#include <xen/interface/xen.h>
34#include <asm/xen/hypervisor.h> 37#include <asm/xen/hypervisor.h>
diff --git a/sound/xen/Kconfig b/sound/xen/Kconfig
index 4f1fceea82d2..e4d7beb4df1c 100644
--- a/sound/xen/Kconfig
+++ b/sound/xen/Kconfig
@@ -5,6 +5,7 @@ config SND_XEN_FRONTEND
5 depends on XEN 5 depends on XEN
6 select SND_PCM 6 select SND_PCM
7 select XEN_XENBUS_FRONTEND 7 select XEN_XENBUS_FRONTEND
8 select XEN_FRONT_PGDIR_SHBUF
8 help 9 help
9 Choose this option if you want to enable a para-virtualized 10 Choose this option if you want to enable a para-virtualized
10 frontend sound driver for Xen guest OSes. 11 frontend sound driver for Xen guest OSes.
diff --git a/sound/xen/Makefile b/sound/xen/Makefile
index 1e6470ecc2f2..24031775b715 100644
--- a/sound/xen/Makefile
+++ b/sound/xen/Makefile
@@ -3,7 +3,6 @@
3snd_xen_front-objs := xen_snd_front.o \ 3snd_xen_front-objs := xen_snd_front.o \
4 xen_snd_front_cfg.o \ 4 xen_snd_front_cfg.o \
5 xen_snd_front_evtchnl.o \ 5 xen_snd_front_evtchnl.o \
6 xen_snd_front_shbuf.o \
7 xen_snd_front_alsa.o 6 xen_snd_front_alsa.o
8 7
9obj-$(CONFIG_SND_XEN_FRONTEND) += snd_xen_front.o 8obj-$(CONFIG_SND_XEN_FRONTEND) += snd_xen_front.o
diff --git a/sound/xen/xen_snd_front.c b/sound/xen/xen_snd_front.c
index b089b13b5160..a9e5c2cd7698 100644
--- a/sound/xen/xen_snd_front.c
+++ b/sound/xen/xen_snd_front.c
@@ -16,12 +16,12 @@
16#include <xen/xen.h> 16#include <xen/xen.h>
17#include <xen/xenbus.h> 17#include <xen/xenbus.h>
18 18
19#include <xen/xen-front-pgdir-shbuf.h>
19#include <xen/interface/io/sndif.h> 20#include <xen/interface/io/sndif.h>
20 21
21#include "xen_snd_front.h" 22#include "xen_snd_front.h"
22#include "xen_snd_front_alsa.h" 23#include "xen_snd_front_alsa.h"
23#include "xen_snd_front_evtchnl.h" 24#include "xen_snd_front_evtchnl.h"
24#include "xen_snd_front_shbuf.h"
25 25
26static struct xensnd_req * 26static struct xensnd_req *
27be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation) 27be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
@@ -82,7 +82,7 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
82} 82}
83 83
84int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl, 84int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
85 struct xen_snd_front_shbuf *sh_buf, 85 struct xen_front_pgdir_shbuf *shbuf,
86 u8 format, unsigned int channels, 86 u8 format, unsigned int channels,
87 unsigned int rate, u32 buffer_sz, 87 unsigned int rate, u32 buffer_sz,
88 u32 period_sz) 88 u32 period_sz)
@@ -99,7 +99,8 @@ int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
99 req->op.open.pcm_rate = rate; 99 req->op.open.pcm_rate = rate;
100 req->op.open.buffer_sz = buffer_sz; 100 req->op.open.buffer_sz = buffer_sz;
101 req->op.open.period_sz = period_sz; 101 req->op.open.period_sz = period_sz;
102 req->op.open.gref_directory = xen_snd_front_shbuf_get_dir_start(sh_buf); 102 req->op.open.gref_directory =
103 xen_front_pgdir_shbuf_get_dir_start(shbuf);
103 mutex_unlock(&evtchnl->ring_io_lock); 104 mutex_unlock(&evtchnl->ring_io_lock);
104 105
105 ret = be_stream_do_io(evtchnl); 106 ret = be_stream_do_io(evtchnl);
diff --git a/sound/xen/xen_snd_front.h b/sound/xen/xen_snd_front.h
index a2ea2463bcc5..05611f113b94 100644
--- a/sound/xen/xen_snd_front.h
+++ b/sound/xen/xen_snd_front.h
@@ -16,7 +16,7 @@
16struct xen_snd_front_card_info; 16struct xen_snd_front_card_info;
17struct xen_snd_front_evtchnl; 17struct xen_snd_front_evtchnl;
18struct xen_snd_front_evtchnl_pair; 18struct xen_snd_front_evtchnl_pair;
19struct xen_snd_front_shbuf; 19struct xen_front_pgdir_shbuf;
20struct xensnd_query_hw_param; 20struct xensnd_query_hw_param;
21 21
22struct xen_snd_front_info { 22struct xen_snd_front_info {
@@ -35,7 +35,7 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
35 struct xensnd_query_hw_param *hw_param_resp); 35 struct xensnd_query_hw_param *hw_param_resp);
36 36
37int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl, 37int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
38 struct xen_snd_front_shbuf *sh_buf, 38 struct xen_front_pgdir_shbuf *shbuf,
39 u8 format, unsigned int channels, 39 u8 format, unsigned int channels,
40 unsigned int rate, u32 buffer_sz, 40 unsigned int rate, u32 buffer_sz,
41 u32 period_sz); 41 u32 period_sz);
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
index 2cbd9679aca1..a7f413cb704d 100644
--- a/sound/xen/xen_snd_front_alsa.c
+++ b/sound/xen/xen_snd_front_alsa.c
@@ -15,17 +15,24 @@
15#include <sound/pcm_params.h> 15#include <sound/pcm_params.h>
16 16
17#include <xen/xenbus.h> 17#include <xen/xenbus.h>
18#include <xen/xen-front-pgdir-shbuf.h>
18 19
19#include "xen_snd_front.h" 20#include "xen_snd_front.h"
20#include "xen_snd_front_alsa.h" 21#include "xen_snd_front_alsa.h"
21#include "xen_snd_front_cfg.h" 22#include "xen_snd_front_cfg.h"
22#include "xen_snd_front_evtchnl.h" 23#include "xen_snd_front_evtchnl.h"
23#include "xen_snd_front_shbuf.h"
24 24
25struct xen_snd_front_pcm_stream_info { 25struct xen_snd_front_pcm_stream_info {
26 struct xen_snd_front_info *front_info; 26 struct xen_snd_front_info *front_info;
27 struct xen_snd_front_evtchnl_pair *evt_pair; 27 struct xen_snd_front_evtchnl_pair *evt_pair;
28 struct xen_snd_front_shbuf sh_buf; 28
29 /* This is the shared buffer with its backing storage. */
30 struct xen_front_pgdir_shbuf shbuf;
31 u8 *buffer;
32 size_t buffer_sz;
33 int num_pages;
34 struct page **pages;
35
29 int index; 36 int index;
30 37
31 bool is_open; 38 bool is_open;
@@ -214,12 +221,20 @@ static void stream_clear(struct xen_snd_front_pcm_stream_info *stream)
214 stream->out_frames = 0; 221 stream->out_frames = 0;
215 atomic_set(&stream->hw_ptr, 0); 222 atomic_set(&stream->hw_ptr, 0);
216 xen_snd_front_evtchnl_pair_clear(stream->evt_pair); 223 xen_snd_front_evtchnl_pair_clear(stream->evt_pair);
217 xen_snd_front_shbuf_clear(&stream->sh_buf); 224 memset(&stream->shbuf, 0, sizeof(stream->shbuf));
225 stream->buffer = NULL;
226 stream->buffer_sz = 0;
227 stream->pages = NULL;
228 stream->num_pages = 0;
218} 229}
219 230
220static void stream_free(struct xen_snd_front_pcm_stream_info *stream) 231static void stream_free(struct xen_snd_front_pcm_stream_info *stream)
221{ 232{
222 xen_snd_front_shbuf_free(&stream->sh_buf); 233 xen_front_pgdir_shbuf_unmap(&stream->shbuf);
234 xen_front_pgdir_shbuf_free(&stream->shbuf);
235 if (stream->buffer)
236 free_pages_exact(stream->buffer, stream->buffer_sz);
237 kfree(stream->pages);
223 stream_clear(stream); 238 stream_clear(stream);
224} 239}
225 240
@@ -421,10 +436,34 @@ static int alsa_close(struct snd_pcm_substream *substream)
421 return 0; 436 return 0;
422} 437}
423 438
439static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
440 size_t buffer_sz)
441{
442 int i;
443
444 stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
445 if (!stream->buffer)
446 return -ENOMEM;
447
448 stream->buffer_sz = buffer_sz;
449 stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
450 stream->pages = kcalloc(stream->num_pages, sizeof(struct page *),
451 GFP_KERNEL);
452 if (!stream->pages)
453 return -ENOMEM;
454
455 for (i = 0; i < stream->num_pages; i++)
456 stream->pages[i] = virt_to_page(stream->buffer + i * PAGE_SIZE);
457
458 return 0;
459}
460
424static int alsa_hw_params(struct snd_pcm_substream *substream, 461static int alsa_hw_params(struct snd_pcm_substream *substream,
425 struct snd_pcm_hw_params *params) 462 struct snd_pcm_hw_params *params)
426{ 463{
427 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream); 464 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
465 struct xen_snd_front_info *front_info = stream->front_info;
466 struct xen_front_pgdir_shbuf_cfg buf_cfg;
428 int ret; 467 int ret;
429 468
430 /* 469 /*
@@ -432,19 +471,32 @@ static int alsa_hw_params(struct snd_pcm_substream *substream,
432 * so free the previously allocated shared buffer if any. 471 * so free the previously allocated shared buffer if any.
433 */ 472 */
434 stream_free(stream); 473 stream_free(stream);
474 ret = shbuf_setup_backstore(stream, params_buffer_bytes(params));
475 if (ret < 0)
476 goto fail;
435 477
436 ret = xen_snd_front_shbuf_alloc(stream->front_info->xb_dev, 478 memset(&buf_cfg, 0, sizeof(buf_cfg));
437 &stream->sh_buf, 479 buf_cfg.xb_dev = front_info->xb_dev;
438 params_buffer_bytes(params)); 480 buf_cfg.pgdir = &stream->shbuf;
439 if (ret < 0) { 481 buf_cfg.num_pages = stream->num_pages;
440 stream_free(stream); 482 buf_cfg.pages = stream->pages;
441 dev_err(&stream->front_info->xb_dev->dev, 483
442 "Failed to allocate buffers for stream with index %d\n", 484 ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
443 stream->index); 485 if (ret < 0)
444 return ret; 486 goto fail;
445 } 487
488 ret = xen_front_pgdir_shbuf_map(&stream->shbuf);
489 if (ret < 0)
490 goto fail;
446 491
447 return 0; 492 return 0;
493
494fail:
495 stream_free(stream);
496 dev_err(&front_info->xb_dev->dev,
497 "Failed to allocate buffers for stream with index %d\n",
498 stream->index);
499 return ret;
448} 500}
449 501
450static int alsa_hw_free(struct snd_pcm_substream *substream) 502static int alsa_hw_free(struct snd_pcm_substream *substream)
@@ -476,7 +528,7 @@ static int alsa_prepare(struct snd_pcm_substream *substream)
476 sndif_format = ret; 528 sndif_format = ret;
477 529
478 ret = xen_snd_front_stream_prepare(&stream->evt_pair->req, 530 ret = xen_snd_front_stream_prepare(&stream->evt_pair->req,
479 &stream->sh_buf, 531 &stream->shbuf,
480 sndif_format, 532 sndif_format,
481 runtime->channels, 533 runtime->channels,
482 runtime->rate, 534 runtime->rate,
@@ -556,10 +608,10 @@ static int alsa_pb_copy_user(struct snd_pcm_substream *substream,
556{ 608{
557 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream); 609 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
558 610
559 if (unlikely(pos + count > stream->sh_buf.buffer_sz)) 611 if (unlikely(pos + count > stream->buffer_sz))
560 return -EINVAL; 612 return -EINVAL;
561 613
562 if (copy_from_user(stream->sh_buf.buffer + pos, src, count)) 614 if (copy_from_user(stream->buffer + pos, src, count))
563 return -EFAULT; 615 return -EFAULT;
564 616
565 return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count); 617 return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
@@ -571,10 +623,10 @@ static int alsa_pb_copy_kernel(struct snd_pcm_substream *substream,
571{ 623{
572 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream); 624 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
573 625
574 if (unlikely(pos + count > stream->sh_buf.buffer_sz)) 626 if (unlikely(pos + count > stream->buffer_sz))
575 return -EINVAL; 627 return -EINVAL;
576 628
577 memcpy(stream->sh_buf.buffer + pos, src, count); 629 memcpy(stream->buffer + pos, src, count);
578 630
579 return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count); 631 return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
580} 632}
@@ -586,14 +638,14 @@ static int alsa_cap_copy_user(struct snd_pcm_substream *substream,
586 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream); 638 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
587 int ret; 639 int ret;
588 640
589 if (unlikely(pos + count > stream->sh_buf.buffer_sz)) 641 if (unlikely(pos + count > stream->buffer_sz))
590 return -EINVAL; 642 return -EINVAL;
591 643
592 ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count); 644 ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
593 if (ret < 0) 645 if (ret < 0)
594 return ret; 646 return ret;
595 647
596 return copy_to_user(dst, stream->sh_buf.buffer + pos, count) ? 648 return copy_to_user(dst, stream->buffer + pos, count) ?
597 -EFAULT : 0; 649 -EFAULT : 0;
598} 650}
599 651
@@ -604,14 +656,14 @@ static int alsa_cap_copy_kernel(struct snd_pcm_substream *substream,
604 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream); 656 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
605 int ret; 657 int ret;
606 658
607 if (unlikely(pos + count > stream->sh_buf.buffer_sz)) 659 if (unlikely(pos + count > stream->buffer_sz))
608 return -EINVAL; 660 return -EINVAL;
609 661
610 ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count); 662 ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
611 if (ret < 0) 663 if (ret < 0)
612 return ret; 664 return ret;
613 665
614 memcpy(dst, stream->sh_buf.buffer + pos, count); 666 memcpy(dst, stream->buffer + pos, count);
615 667
616 return 0; 668 return 0;
617} 669}
@@ -622,10 +674,10 @@ static int alsa_pb_fill_silence(struct snd_pcm_substream *substream,
622{ 674{
623 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream); 675 struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
624 676
625 if (unlikely(pos + count > stream->sh_buf.buffer_sz)) 677 if (unlikely(pos + count > stream->buffer_sz))
626 return -EINVAL; 678 return -EINVAL;
627 679
628 memset(stream->sh_buf.buffer + pos, 0, count); 680 memset(stream->buffer + pos, 0, count);
629 681
630 return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count); 682 return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
631} 683}
diff --git a/sound/xen/xen_snd_front_shbuf.c b/sound/xen/xen_snd_front_shbuf.c
deleted file mode 100644
index 07ac176a41ba..000000000000
--- a/sound/xen/xen_snd_front_shbuf.c
+++ /dev/null
@@ -1,194 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen para-virtual sound device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#include <linux/kernel.h>
12#include <xen/xen.h>
13#include <xen/xenbus.h>
14
15#include "xen_snd_front_shbuf.h"
16
17grant_ref_t xen_snd_front_shbuf_get_dir_start(struct xen_snd_front_shbuf *buf)
18{
19 if (!buf->grefs)
20 return GRANT_INVALID_REF;
21
22 return buf->grefs[0];
23}
24
25void xen_snd_front_shbuf_clear(struct xen_snd_front_shbuf *buf)
26{
27 memset(buf, 0, sizeof(*buf));
28}
29
30void xen_snd_front_shbuf_free(struct xen_snd_front_shbuf *buf)
31{
32 int i;
33
34 if (buf->grefs) {
35 for (i = 0; i < buf->num_grefs; i++)
36 if (buf->grefs[i] != GRANT_INVALID_REF)
37 gnttab_end_foreign_access(buf->grefs[i],
38 0, 0UL);
39 kfree(buf->grefs);
40 }
41 kfree(buf->directory);
42 free_pages_exact(buf->buffer, buf->buffer_sz);
43 xen_snd_front_shbuf_clear(buf);
44}
45
46/*
47 * number of grant references a page can hold with respect to the
48 * xensnd_page_directory header
49 */
50#define XENSND_NUM_GREFS_PER_PAGE ((XEN_PAGE_SIZE - \
51 offsetof(struct xensnd_page_directory, gref)) / \
52 sizeof(grant_ref_t))
53
54static void fill_page_dir(struct xen_snd_front_shbuf *buf,
55 int num_pages_dir)
56{
57 struct xensnd_page_directory *page_dir;
58 unsigned char *ptr;
59 int i, cur_gref, grefs_left, to_copy;
60
61 ptr = buf->directory;
62 grefs_left = buf->num_grefs - num_pages_dir;
63 /*
64 * skip grant references at the beginning, they are for pages granted
65 * for the page directory itself
66 */
67 cur_gref = num_pages_dir;
68 for (i = 0; i < num_pages_dir; i++) {
69 page_dir = (struct xensnd_page_directory *)ptr;
70 if (grefs_left <= XENSND_NUM_GREFS_PER_PAGE) {
71 to_copy = grefs_left;
72 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
73 } else {
74 to_copy = XENSND_NUM_GREFS_PER_PAGE;
75 page_dir->gref_dir_next_page = buf->grefs[i + 1];
76 }
77
78 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
79 to_copy * sizeof(grant_ref_t));
80
81 ptr += XEN_PAGE_SIZE;
82 grefs_left -= to_copy;
83 cur_gref += to_copy;
84 }
85}
86
87static int grant_references(struct xenbus_device *xb_dev,
88 struct xen_snd_front_shbuf *buf,
89 int num_pages_dir, int num_pages_buffer,
90 int num_grefs)
91{
92 grant_ref_t priv_gref_head;
93 unsigned long frame;
94 int ret, i, j, cur_ref;
95 int otherend_id;
96
97 ret = gnttab_alloc_grant_references(num_grefs, &priv_gref_head);
98 if (ret)
99 return ret;
100
101 buf->num_grefs = num_grefs;
102 otherend_id = xb_dev->otherend_id;
103 j = 0;
104
105 for (i = 0; i < num_pages_dir; i++) {
106 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
107 if (cur_ref < 0) {
108 ret = cur_ref;
109 goto fail;
110 }
111
112 frame = xen_page_to_gfn(virt_to_page(buf->directory +
113 XEN_PAGE_SIZE * i));
114 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
115 buf->grefs[j++] = cur_ref;
116 }
117
118 for (i = 0; i < num_pages_buffer; i++) {
119 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
120 if (cur_ref < 0) {
121 ret = cur_ref;
122 goto fail;
123 }
124
125 frame = xen_page_to_gfn(virt_to_page(buf->buffer +
126 XEN_PAGE_SIZE * i));
127 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
128 buf->grefs[j++] = cur_ref;
129 }
130
131 gnttab_free_grant_references(priv_gref_head);
132 fill_page_dir(buf, num_pages_dir);
133 return 0;
134
135fail:
136 gnttab_free_grant_references(priv_gref_head);
137 return ret;
138}
139
140static int alloc_int_buffers(struct xen_snd_front_shbuf *buf,
141 int num_pages_dir, int num_pages_buffer,
142 int num_grefs)
143{
144 buf->grefs = kcalloc(num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
145 if (!buf->grefs)
146 return -ENOMEM;
147
148 buf->directory = kcalloc(num_pages_dir, XEN_PAGE_SIZE, GFP_KERNEL);
149 if (!buf->directory)
150 goto fail;
151
152 buf->buffer_sz = num_pages_buffer * XEN_PAGE_SIZE;
153 buf->buffer = alloc_pages_exact(buf->buffer_sz, GFP_KERNEL);
154 if (!buf->buffer)
155 goto fail;
156
157 return 0;
158
159fail:
160 kfree(buf->grefs);
161 buf->grefs = NULL;
162 kfree(buf->directory);
163 buf->directory = NULL;
164 return -ENOMEM;
165}
166
167int xen_snd_front_shbuf_alloc(struct xenbus_device *xb_dev,
168 struct xen_snd_front_shbuf *buf,
169 unsigned int buffer_sz)
170{
171 int num_pages_buffer, num_pages_dir, num_grefs;
172 int ret;
173
174 xen_snd_front_shbuf_clear(buf);
175
176 num_pages_buffer = DIV_ROUND_UP(buffer_sz, XEN_PAGE_SIZE);
177 /* number of pages the page directory consumes itself */
178 num_pages_dir = DIV_ROUND_UP(num_pages_buffer,
179 XENSND_NUM_GREFS_PER_PAGE);
180 num_grefs = num_pages_buffer + num_pages_dir;
181
182 ret = alloc_int_buffers(buf, num_pages_dir,
183 num_pages_buffer, num_grefs);
184 if (ret < 0)
185 return ret;
186
187 ret = grant_references(xb_dev, buf, num_pages_dir, num_pages_buffer,
188 num_grefs);
189 if (ret < 0)
190 return ret;
191
192 fill_page_dir(buf, num_pages_dir);
193 return 0;
194}
diff --git a/sound/xen/xen_snd_front_shbuf.h b/sound/xen/xen_snd_front_shbuf.h
deleted file mode 100644
index d28e97c47b2c..000000000000
--- a/sound/xen/xen_snd_front_shbuf.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2
3/*
4 * Xen para-virtual sound device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11#ifndef __XEN_SND_FRONT_SHBUF_H
12#define __XEN_SND_FRONT_SHBUF_H
13
14#include <xen/grant_table.h>
15
16#include "xen_snd_front_evtchnl.h"
17
18struct xen_snd_front_shbuf {
19 int num_grefs;
20 grant_ref_t *grefs;
21 u8 *directory;
22 u8 *buffer;
23 size_t buffer_sz;
24};
25
26grant_ref_t xen_snd_front_shbuf_get_dir_start(struct xen_snd_front_shbuf *buf);
27
28int xen_snd_front_shbuf_alloc(struct xenbus_device *xb_dev,
29 struct xen_snd_front_shbuf *buf,
30 unsigned int buffer_sz);
31
32void xen_snd_front_shbuf_clear(struct xen_snd_front_shbuf *buf);
33
34void xen_snd_front_shbuf_free(struct xen_snd_front_shbuf *buf);
35
36#endif /* __XEN_SND_FRONT_SHBUF_H */