aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-07 13:24:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-07 13:24:21 -0400
commit3ee31b89d9b12c01aa03dda7a923ef07a800eedd (patch)
tree3eff45b4147cb74a72c382414a15c5f91e2ec228
parentbac65d9d87b383471d8d29128319508d71b74180 (diff)
parentd785d9ec7894d0e92f0d0eecc8add9c84131daa4 (diff)
Merge tag 'for-linus-4.14b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - the new pvcalls backend for routing socket calls from a guest to dom0 - some cleanups of Xen code - a fix for wrong usage of {get,put}_cpu() * tag 'for-linus-4.14b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (27 commits) xen/mmu: set MMU_NORMAL_PT_UPDATE in remap_area_mfn_pte_fn xen: Don't try to call xen_alloc_p2m_entry() on autotranslating guests xen/events: events_fifo: Don't use {get,put}_cpu() in xen_evtchn_fifo_init() xen/pvcalls: use WARN_ON(1) instead of __WARN() xen: remove not used trace functions xen: remove unused function xen_set_domain_pte() xen: remove tests for pvh mode in pure pv paths xen-platform: constify pci_device_id. xen: cleanup xen.h xen: introduce a Kconfig option to enable the pvcalls backend xen/pvcalls: implement write xen/pvcalls: implement read xen/pvcalls: implement the ioworker functions xen/pvcalls: disconnect and module_exit xen/pvcalls: implement release command xen/pvcalls: implement poll command xen/pvcalls: implement accept command xen/pvcalls: implement listen command xen/pvcalls: implement bind command xen/pvcalls: implement connect command ...
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/xen/mmu.c2
-rw-r--r--arch/x86/xen/mmu_pv.c20
-rw-r--r--arch/x86/xen/p2m.c25
-rw-r--r--arch/x86/xen/setup.c5
-rw-r--r--drivers/xen/Kconfig12
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c8
-rw-r--r--drivers/xen/events/events_fifo.c7
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/pvcalls-back.c1240
-rw-r--r--include/trace/events/xen.h38
-rw-r--r--include/xen/interface/io/pvcalls.h121
-rw-r--r--include/xen/interface/io/ring.h2
-rw-r--r--include/xen/xen.h20
15 files changed, 1397 insertions, 111 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 8417ef7c3885..07b6531813c4 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -158,9 +158,6 @@ static inline unsigned long mfn_to_pfn_no_overrides(unsigned long mfn)
158 unsigned long pfn; 158 unsigned long pfn;
159 int ret; 159 int ret;
160 160
161 if (xen_feature(XENFEAT_auto_translated_physmap))
162 return mfn;
163
164 if (unlikely(mfn >= machine_to_phys_nr)) 161 if (unlikely(mfn >= machine_to_phys_nr))
165 return ~0; 162 return ~0;
166 163
@@ -317,8 +314,6 @@ static inline pte_t __pte_ma(pteval_t x)
317#define p4d_val_ma(x) ((x).p4d) 314#define p4d_val_ma(x) ((x).p4d)
318#endif 315#endif
319 316
320void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid);
321
322xmaddr_t arbitrary_virt_to_machine(void *address); 317xmaddr_t arbitrary_virt_to_machine(void *address);
323unsigned long arbitrary_virt_to_mfn(void *vaddr); 318unsigned long arbitrary_virt_to_mfn(void *vaddr);
324void make_lowmem_page_readonly(void *vaddr); 319void make_lowmem_page_readonly(void *vaddr);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3be06f3caf3c..3e15345abfe7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -84,7 +84,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
84 else 84 else
85 rmd->mfn++; 85 rmd->mfn++;
86 86
87 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; 87 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
88 rmd->mmu_update->val = pte_val_ma(pte); 88 rmd->mmu_update->val = pte_val_ma(pte);
89 rmd->mmu_update++; 89 rmd->mmu_update++;
90 90
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index e437714750f8..6b983b300666 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -162,26 +162,6 @@ static bool xen_page_pinned(void *ptr)
162 return PagePinned(page); 162 return PagePinned(page);
163} 163}
164 164
165void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
166{
167 struct multicall_space mcs;
168 struct mmu_update *u;
169
170 trace_xen_mmu_set_domain_pte(ptep, pteval, domid);
171
172 mcs = xen_mc_entry(sizeof(*u));
173 u = mcs.args;
174
175 /* ptep might be kmapped when using 32-bit HIGHPTE */
176 u->ptr = virt_to_machine(ptep).maddr;
177 u->val = pte_val_ma(pteval);
178
179 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
180
181 xen_mc_issue(PARAVIRT_LAZY_MMU);
182}
183EXPORT_SYMBOL_GPL(xen_set_domain_pte);
184
185static void xen_extend_mmu_update(const struct mmu_update *update) 165static void xen_extend_mmu_update(const struct mmu_update *update)
186{ 166{
187 struct multicall_space mcs; 167 struct multicall_space mcs;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 276da636dd39..6083ba462f35 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -212,8 +212,7 @@ void __ref xen_build_mfn_list_list(void)
212 unsigned int level, topidx, mididx; 212 unsigned int level, topidx, mididx;
213 unsigned long *mid_mfn_p; 213 unsigned long *mid_mfn_p;
214 214
215 if (xen_feature(XENFEAT_auto_translated_physmap) || 215 if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
216 xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
217 return; 216 return;
218 217
219 /* Pre-initialize p2m_top_mfn to be completely missing */ 218 /* Pre-initialize p2m_top_mfn to be completely missing */
@@ -269,9 +268,6 @@ void __ref xen_build_mfn_list_list(void)
269 268
270void xen_setup_mfn_list_list(void) 269void xen_setup_mfn_list_list(void)
271{ 270{
272 if (xen_feature(XENFEAT_auto_translated_physmap))
273 return;
274
275 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); 271 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
276 272
277 if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) 273 if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
@@ -291,9 +287,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
291{ 287{
292 unsigned long pfn; 288 unsigned long pfn;
293 289
294 if (xen_feature(XENFEAT_auto_translated_physmap))
295 return;
296
297 xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list; 290 xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
298 xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE); 291 xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
299 292
@@ -540,9 +533,6 @@ int xen_alloc_p2m_entry(unsigned long pfn)
540 unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); 533 unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
541 unsigned long p2m_pfn; 534 unsigned long p2m_pfn;
542 535
543 if (xen_feature(XENFEAT_auto_translated_physmap))
544 return 0;
545
546 ptep = lookup_address(addr, &level); 536 ptep = lookup_address(addr, &level);
547 BUG_ON(!ptep || level != PG_LEVEL_4K); 537 BUG_ON(!ptep || level != PG_LEVEL_4K);
548 pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); 538 pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
@@ -640,9 +630,6 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
640 if (unlikely(pfn_s >= xen_p2m_size)) 630 if (unlikely(pfn_s >= xen_p2m_size))
641 return 0; 631 return 0;
642 632
643 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
644 return pfn_e - pfn_s;
645
646 if (pfn_s > pfn_e) 633 if (pfn_s > pfn_e)
647 return 0; 634 return 0;
648 635
@@ -660,10 +647,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
660 pte_t *ptep; 647 pte_t *ptep;
661 unsigned int level; 648 unsigned int level;
662 649
663 /* don't track P2M changes in autotranslate guests */
664 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
665 return true;
666
667 if (unlikely(pfn >= xen_p2m_size)) { 650 if (unlikely(pfn >= xen_p2m_size)) {
668 BUG_ON(mfn != INVALID_P2M_ENTRY); 651 BUG_ON(mfn != INVALID_P2M_ENTRY);
669 return true; 652 return true;
@@ -711,9 +694,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
711 int i, ret = 0; 694 int i, ret = 0;
712 pte_t *pte; 695 pte_t *pte;
713 696
714 if (xen_feature(XENFEAT_auto_translated_physmap))
715 return 0;
716
717 if (kmap_ops) { 697 if (kmap_ops) {
718 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, 698 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
719 kmap_ops, count); 699 kmap_ops, count);
@@ -756,9 +736,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
756{ 736{
757 int i, ret = 0; 737 int i, ret = 0;
758 738
759 if (xen_feature(XENFEAT_auto_translated_physmap))
760 return 0;
761
762 for (i = 0; i < count; i++) { 739 for (i = 0; i < count; i++) {
763 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); 740 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
764 unsigned long pfn = page_to_pfn(pages[i]); 741 unsigned long pfn = page_to_pfn(pages[i]);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index c81046323ebc..ac55c02f98e9 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -340,8 +340,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
340 340
341 WARN_ON(size == 0); 341 WARN_ON(size == 0);
342 342
343 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
344
345 mfn_save = virt_to_mfn(buf); 343 mfn_save = virt_to_mfn(buf);
346 344
347 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; 345 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
@@ -1024,8 +1022,7 @@ void __init xen_pvmmu_arch_setup(void)
1024void __init xen_arch_setup(void) 1022void __init xen_arch_setup(void)
1025{ 1023{
1026 xen_panic_handler_init(); 1024 xen_panic_handler_init();
1027 if (!xen_feature(XENFEAT_auto_translated_physmap)) 1025 xen_pvmmu_arch_setup();
1028 xen_pvmmu_arch_setup();
1029 1026
1030#ifdef CONFIG_ACPI 1027#ifdef CONFIG_ACPI
1031 if (!(xen_start_info->flags & SIF_INITDOMAIN)) { 1028 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f15bb3b789d5..4545561954ee 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -196,6 +196,18 @@ config XEN_PCIDEV_BACKEND
196 196
197 If in doubt, say m. 197 If in doubt, say m.
198 198
199config XEN_PVCALLS_BACKEND
200 bool "XEN PV Calls backend driver"
201 depends on INET && XEN && XEN_BACKEND
202 default n
203 help
204 Experimental backend for the Xen PV Calls protocol
205 (https://xenbits.xen.org/docs/unstable/misc/pvcalls.html). It
206 allows PV Calls frontends to send POSIX calls to the backend,
207 which implements them.
208
209 If in doubt, say n.
210
199config XEN_SCSI_BACKEND 211config XEN_SCSI_BACKEND
200 tristate "XEN SCSI backend driver" 212 tristate "XEN SCSI backend driver"
201 depends on XEN && XEN_BACKEND && TARGET_CORE 213 depends on XEN && XEN_BACKEND && TARGET_CORE
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 7f188b8d0c67..caaa15dc37bc 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
35obj-$(CONFIG_XEN_EFI) += efi.o 35obj-$(CONFIG_XEN_EFI) += efi.o
36obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o 36obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o
37obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o 37obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o
38obj-$(CONFIG_XEN_PVCALLS_BACKEND) += pvcalls-back.o
38xen-evtchn-y := evtchn.o 39xen-evtchn-y := evtchn.o
39xen-gntdev-y := gntdev.o 40xen-gntdev-y := gntdev.o
40xen-gntalloc-y := gntalloc.o 41xen-gntalloc-y := gntalloc.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ab609255a0f3..f77e499afddd 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -664,9 +664,11 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
664 */ 664 */
665 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); 665 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
666 666
667 ret = xen_alloc_p2m_entry(page_to_pfn(page)); 667 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
668 if (ret < 0) 668 ret = xen_alloc_p2m_entry(page_to_pfn(page));
669 goto out_undo; 669 if (ret < 0)
670 goto out_undo;
671 }
670#endif 672#endif
671 } else { 673 } else {
672 ret = add_ballooned_pages(nr_pages - pgno); 674 ret = add_ballooned_pages(nr_pages - pgno);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 3c41470c7fc4..76b318e88382 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -432,12 +432,12 @@ static int xen_evtchn_cpu_dead(unsigned int cpu)
432 432
433int __init xen_evtchn_fifo_init(void) 433int __init xen_evtchn_fifo_init(void)
434{ 434{
435 int cpu = get_cpu(); 435 int cpu = smp_processor_id();
436 int ret; 436 int ret;
437 437
438 ret = evtchn_fifo_alloc_control_block(cpu); 438 ret = evtchn_fifo_alloc_control_block(cpu);
439 if (ret < 0) 439 if (ret < 0)
440 goto out; 440 return ret;
441 441
442 pr_info("Using FIFO-based ABI\n"); 442 pr_info("Using FIFO-based ABI\n");
443 443
@@ -446,7 +446,6 @@ int __init xen_evtchn_fifo_init(void)
446 cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE, 446 cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
447 "xen/evtchn:prepare", 447 "xen/evtchn:prepare",
448 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead); 448 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
449out: 449
450 put_cpu();
451 return ret; 450 return ret;
452} 451}
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 1275df83070f..5d7dcad0b0a0 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -175,7 +175,7 @@ pci_out:
175 return ret; 175 return ret;
176} 176}
177 177
178static struct pci_device_id platform_pci_tbl[] = { 178static const struct pci_device_id platform_pci_tbl[] = {
179 {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, 179 {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
181 {0,} 181 {0,}
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
new file mode 100644
index 000000000000..b209cd44bb8d
--- /dev/null
+++ b/drivers/xen/pvcalls-back.c
@@ -0,0 +1,1240 @@
1/*
2 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/inet.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/radix-tree.h>
19#include <linux/module.h>
20#include <linux/semaphore.h>
21#include <linux/wait.h>
22#include <net/sock.h>
23#include <net/inet_common.h>
24#include <net/inet_connection_sock.h>
25#include <net/request_sock.h>
26
27#include <xen/events.h>
28#include <xen/grant_table.h>
29#include <xen/xen.h>
30#include <xen/xenbus.h>
31#include <xen/interface/io/pvcalls.h>
32
33#define PVCALLS_VERSIONS "1"
34#define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
35
36struct pvcalls_back_global {
37 struct list_head frontends;
38 struct semaphore frontends_lock;
39} pvcalls_back_global;
40
41/*
42 * Per-frontend data structure. It contains pointers to the command
43 * ring, its event channel, a list of active sockets and a tree of
44 * passive sockets.
45 */
46struct pvcalls_fedata {
47 struct list_head list;
48 struct xenbus_device *dev;
49 struct xen_pvcalls_sring *sring;
50 struct xen_pvcalls_back_ring ring;
51 int irq;
52 struct list_head socket_mappings;
53 struct radix_tree_root socketpass_mappings;
54 struct semaphore socket_lock;
55};
56
57struct pvcalls_ioworker {
58 struct work_struct register_work;
59 struct workqueue_struct *wq;
60};
61
62struct sock_mapping {
63 struct list_head list;
64 struct pvcalls_fedata *fedata;
65 struct sockpass_mapping *sockpass;
66 struct socket *sock;
67 uint64_t id;
68 grant_ref_t ref;
69 struct pvcalls_data_intf *ring;
70 void *bytes;
71 struct pvcalls_data data;
72 uint32_t ring_order;
73 int irq;
74 atomic_t read;
75 atomic_t write;
76 atomic_t io;
77 atomic_t release;
78 void (*saved_data_ready)(struct sock *sk);
79 struct pvcalls_ioworker ioworker;
80};
81
82struct sockpass_mapping {
83 struct list_head list;
84 struct pvcalls_fedata *fedata;
85 struct socket *sock;
86 uint64_t id;
87 struct xen_pvcalls_request reqcopy;
88 spinlock_t copy_lock;
89 struct workqueue_struct *wq;
90 struct work_struct register_work;
91 void (*saved_data_ready)(struct sock *sk);
92};
93
94static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
95static int pvcalls_back_release_active(struct xenbus_device *dev,
96 struct pvcalls_fedata *fedata,
97 struct sock_mapping *map);
98
99static void pvcalls_conn_back_read(void *opaque)
100{
101 struct sock_mapping *map = (struct sock_mapping *)opaque;
102 struct msghdr msg;
103 struct kvec vec[2];
104 RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
105 int32_t error;
106 struct pvcalls_data_intf *intf = map->ring;
107 struct pvcalls_data *data = &map->data;
108 unsigned long flags;
109 int ret;
110
111 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
112 cons = intf->in_cons;
113 prod = intf->in_prod;
114 error = intf->in_error;
115 /* read the indexes first, then deal with the data */
116 virt_mb();
117
118 if (error)
119 return;
120
121 size = pvcalls_queued(prod, cons, array_size);
122 if (size >= array_size)
123 return;
124 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
125 if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
126 atomic_set(&map->read, 0);
127 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
128 flags);
129 return;
130 }
131 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
132 wanted = array_size - size;
133 masked_prod = pvcalls_mask(prod, array_size);
134 masked_cons = pvcalls_mask(cons, array_size);
135
136 memset(&msg, 0, sizeof(msg));
137 msg.msg_iter.type = ITER_KVEC|WRITE;
138 msg.msg_iter.count = wanted;
139 if (masked_prod < masked_cons) {
140 vec[0].iov_base = data->in + masked_prod;
141 vec[0].iov_len = wanted;
142 msg.msg_iter.kvec = vec;
143 msg.msg_iter.nr_segs = 1;
144 } else {
145 vec[0].iov_base = data->in + masked_prod;
146 vec[0].iov_len = array_size - masked_prod;
147 vec[1].iov_base = data->in;
148 vec[1].iov_len = wanted - vec[0].iov_len;
149 msg.msg_iter.kvec = vec;
150 msg.msg_iter.nr_segs = 2;
151 }
152
153 atomic_set(&map->read, 0);
154 ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
155 WARN_ON(ret > wanted);
156 if (ret == -EAGAIN) /* shouldn't happen */
157 return;
158 if (!ret)
159 ret = -ENOTCONN;
160 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
161 if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
162 atomic_inc(&map->read);
163 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
164
165 /* write the data, then modify the indexes */
166 virt_wmb();
167 if (ret < 0)
168 intf->in_error = ret;
169 else
170 intf->in_prod = prod + ret;
171 /* update the indexes, then notify the other end */
172 virt_wmb();
173 notify_remote_via_irq(map->irq);
174
175 return;
176}
177
178static void pvcalls_conn_back_write(struct sock_mapping *map)
179{
180 struct pvcalls_data_intf *intf = map->ring;
181 struct pvcalls_data *data = &map->data;
182 struct msghdr msg;
183 struct kvec vec[2];
184 RING_IDX cons, prod, size, array_size;
185 int ret;
186
187 cons = intf->out_cons;
188 prod = intf->out_prod;
189 /* read the indexes before dealing with the data */
190 virt_mb();
191
192 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
193 size = pvcalls_queued(prod, cons, array_size);
194 if (size == 0)
195 return;
196
197 memset(&msg, 0, sizeof(msg));
198 msg.msg_flags |= MSG_DONTWAIT;
199 msg.msg_iter.type = ITER_KVEC|READ;
200 msg.msg_iter.count = size;
201 if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
202 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
203 vec[0].iov_len = size;
204 msg.msg_iter.kvec = vec;
205 msg.msg_iter.nr_segs = 1;
206 } else {
207 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
208 vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
209 vec[1].iov_base = data->out;
210 vec[1].iov_len = size - vec[0].iov_len;
211 msg.msg_iter.kvec = vec;
212 msg.msg_iter.nr_segs = 2;
213 }
214
215 atomic_set(&map->write, 0);
216 ret = inet_sendmsg(map->sock, &msg, size);
217 if (ret == -EAGAIN || (ret >= 0 && ret < size)) {
218 atomic_inc(&map->write);
219 atomic_inc(&map->io);
220 }
221 if (ret == -EAGAIN)
222 return;
223
224 /* write the data, then update the indexes */
225 virt_wmb();
226 if (ret < 0) {
227 intf->out_error = ret;
228 } else {
229 intf->out_error = 0;
230 intf->out_cons = cons + ret;
231 prod = intf->out_prod;
232 }
233 /* update the indexes, then notify the other end */
234 virt_wmb();
235 if (prod != cons + ret)
236 atomic_inc(&map->write);
237 notify_remote_via_irq(map->irq);
238}
239
240static void pvcalls_back_ioworker(struct work_struct *work)
241{
242 struct pvcalls_ioworker *ioworker = container_of(work,
243 struct pvcalls_ioworker, register_work);
244 struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
245 ioworker);
246
247 while (atomic_read(&map->io) > 0) {
248 if (atomic_read(&map->release) > 0) {
249 atomic_set(&map->release, 0);
250 return;
251 }
252
253 if (atomic_read(&map->read) > 0)
254 pvcalls_conn_back_read(map);
255 if (atomic_read(&map->write) > 0)
256 pvcalls_conn_back_write(map);
257
258 atomic_dec(&map->io);
259 }
260}
261
262static int pvcalls_back_socket(struct xenbus_device *dev,
263 struct xen_pvcalls_request *req)
264{
265 struct pvcalls_fedata *fedata;
266 int ret;
267 struct xen_pvcalls_response *rsp;
268
269 fedata = dev_get_drvdata(&dev->dev);
270
271 if (req->u.socket.domain != AF_INET ||
272 req->u.socket.type != SOCK_STREAM ||
273 (req->u.socket.protocol != IPPROTO_IP &&
274 req->u.socket.protocol != AF_INET))
275 ret = -EAFNOSUPPORT;
276 else
277 ret = 0;
278
279 /* leave the actual socket allocation for later */
280
281 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
282 rsp->req_id = req->req_id;
283 rsp->cmd = req->cmd;
284 rsp->u.socket.id = req->u.socket.id;
285 rsp->ret = ret;
286
287 return 0;
288}
289
290static void pvcalls_sk_state_change(struct sock *sock)
291{
292 struct sock_mapping *map = sock->sk_user_data;
293 struct pvcalls_data_intf *intf;
294
295 if (map == NULL)
296 return;
297
298 intf = map->ring;
299 intf->in_error = -ENOTCONN;
300 notify_remote_via_irq(map->irq);
301}
302
303static void pvcalls_sk_data_ready(struct sock *sock)
304{
305 struct sock_mapping *map = sock->sk_user_data;
306 struct pvcalls_ioworker *iow;
307
308 if (map == NULL)
309 return;
310
311 iow = &map->ioworker;
312 atomic_inc(&map->read);
313 atomic_inc(&map->io);
314 queue_work(iow->wq, &iow->register_work);
315}
316
317static struct sock_mapping *pvcalls_new_active_socket(
318 struct pvcalls_fedata *fedata,
319 uint64_t id,
320 grant_ref_t ref,
321 uint32_t evtchn,
322 struct socket *sock)
323{
324 int ret;
325 struct sock_mapping *map;
326 void *page;
327
328 map = kzalloc(sizeof(*map), GFP_KERNEL);
329 if (map == NULL)
330 return NULL;
331
332 map->fedata = fedata;
333 map->sock = sock;
334 map->id = id;
335 map->ref = ref;
336
337 ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
338 if (ret < 0)
339 goto out;
340 map->ring = page;
341 map->ring_order = map->ring->ring_order;
342 /* first read the order, then map the data ring */
343 virt_rmb();
344 if (map->ring_order > MAX_RING_ORDER) {
345 pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
346 __func__, map->ring_order, MAX_RING_ORDER);
347 goto out;
348 }
349 ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
350 (1 << map->ring_order), &page);
351 if (ret < 0)
352 goto out;
353 map->bytes = page;
354
355 ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id,
356 evtchn,
357 pvcalls_back_conn_event,
358 0,
359 "pvcalls-backend",
360 map);
361 if (ret < 0)
362 goto out;
363 map->irq = ret;
364
365 map->data.in = map->bytes;
366 map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
367
368 map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
369 if (!map->ioworker.wq)
370 goto out;
371 atomic_set(&map->io, 1);
372 INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
373
374 down(&fedata->socket_lock);
375 list_add_tail(&map->list, &fedata->socket_mappings);
376 up(&fedata->socket_lock);
377
378 write_lock_bh(&map->sock->sk->sk_callback_lock);
379 map->saved_data_ready = map->sock->sk->sk_data_ready;
380 map->sock->sk->sk_user_data = map;
381 map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
382 map->sock->sk->sk_state_change = pvcalls_sk_state_change;
383 write_unlock_bh(&map->sock->sk->sk_callback_lock);
384
385 return map;
386out:
387 down(&fedata->socket_lock);
388 list_del(&map->list);
389 pvcalls_back_release_active(fedata->dev, fedata, map);
390 up(&fedata->socket_lock);
391 return NULL;
392}
393
394static int pvcalls_back_connect(struct xenbus_device *dev,
395 struct xen_pvcalls_request *req)
396{
397 struct pvcalls_fedata *fedata;
398 int ret = -EINVAL;
399 struct socket *sock;
400 struct sock_mapping *map;
401 struct xen_pvcalls_response *rsp;
402 struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
403
404 fedata = dev_get_drvdata(&dev->dev);
405
406 if (req->u.connect.len < sizeof(sa->sa_family) ||
407 req->u.connect.len > sizeof(req->u.connect.addr) ||
408 sa->sa_family != AF_INET)
409 goto out;
410
411 ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
412 if (ret < 0)
413 goto out;
414 ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
415 if (ret < 0) {
416 sock_release(sock);
417 goto out;
418 }
419
420 map = pvcalls_new_active_socket(fedata,
421 req->u.connect.id,
422 req->u.connect.ref,
423 req->u.connect.evtchn,
424 sock);
425 if (!map) {
426 ret = -EFAULT;
427 sock_release(map->sock);
428 }
429
430out:
431 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
432 rsp->req_id = req->req_id;
433 rsp->cmd = req->cmd;
434 rsp->u.connect.id = req->u.connect.id;
435 rsp->ret = ret;
436
437 return 0;
438}
439
440static int pvcalls_back_release_active(struct xenbus_device *dev,
441 struct pvcalls_fedata *fedata,
442 struct sock_mapping *map)
443{
444 disable_irq(map->irq);
445 if (map->sock->sk != NULL) {
446 write_lock_bh(&map->sock->sk->sk_callback_lock);
447 map->sock->sk->sk_user_data = NULL;
448 map->sock->sk->sk_data_ready = map->saved_data_ready;
449 write_unlock_bh(&map->sock->sk->sk_callback_lock);
450 }
451
452 atomic_set(&map->release, 1);
453 flush_work(&map->ioworker.register_work);
454
455 xenbus_unmap_ring_vfree(dev, map->bytes);
456 xenbus_unmap_ring_vfree(dev, (void *)map->ring);
457 unbind_from_irqhandler(map->irq, map);
458
459 sock_release(map->sock);
460 kfree(map);
461
462 return 0;
463}
464
465static int pvcalls_back_release_passive(struct xenbus_device *dev,
466 struct pvcalls_fedata *fedata,
467 struct sockpass_mapping *mappass)
468{
469 if (mappass->sock->sk != NULL) {
470 write_lock_bh(&mappass->sock->sk->sk_callback_lock);
471 mappass->sock->sk->sk_user_data = NULL;
472 mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
473 write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
474 }
475 sock_release(mappass->sock);
476 flush_workqueue(mappass->wq);
477 destroy_workqueue(mappass->wq);
478 kfree(mappass);
479
480 return 0;
481}
482
483static int pvcalls_back_release(struct xenbus_device *dev,
484 struct xen_pvcalls_request *req)
485{
486 struct pvcalls_fedata *fedata;
487 struct sock_mapping *map, *n;
488 struct sockpass_mapping *mappass;
489 int ret = 0;
490 struct xen_pvcalls_response *rsp;
491
492 fedata = dev_get_drvdata(&dev->dev);
493
494 down(&fedata->socket_lock);
495 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
496 if (map->id == req->u.release.id) {
497 list_del(&map->list);
498 up(&fedata->socket_lock);
499 ret = pvcalls_back_release_active(dev, fedata, map);
500 goto out;
501 }
502 }
503 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
504 req->u.release.id);
505 if (mappass != NULL) {
506 radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
507 up(&fedata->socket_lock);
508 ret = pvcalls_back_release_passive(dev, fedata, mappass);
509 } else
510 up(&fedata->socket_lock);
511
512out:
513 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
514 rsp->req_id = req->req_id;
515 rsp->u.release.id = req->u.release.id;
516 rsp->cmd = req->cmd;
517 rsp->ret = ret;
518 return 0;
519}
520
521static void __pvcalls_back_accept(struct work_struct *work)
522{
523 struct sockpass_mapping *mappass = container_of(
524 work, struct sockpass_mapping, register_work);
525 struct sock_mapping *map;
526 struct pvcalls_ioworker *iow;
527 struct pvcalls_fedata *fedata;
528 struct socket *sock;
529 struct xen_pvcalls_response *rsp;
530 struct xen_pvcalls_request *req;
531 int notify;
532 int ret = -EINVAL;
533 unsigned long flags;
534
535 fedata = mappass->fedata;
536 /*
537 * __pvcalls_back_accept can race against pvcalls_back_accept.
538 * We only need to check the value of "cmd" on read. It could be
539 * done atomically, but to simplify the code on the write side, we
540 * use a spinlock.
541 */
542 spin_lock_irqsave(&mappass->copy_lock, flags);
543 req = &mappass->reqcopy;
544 if (req->cmd != PVCALLS_ACCEPT) {
545 spin_unlock_irqrestore(&mappass->copy_lock, flags);
546 return;
547 }
548 spin_unlock_irqrestore(&mappass->copy_lock, flags);
549
550 sock = sock_alloc();
551 if (sock == NULL)
552 goto out_error;
553 sock->type = mappass->sock->type;
554 sock->ops = mappass->sock->ops;
555
556 ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
557 if (ret == -EAGAIN) {
558 sock_release(sock);
559 goto out_error;
560 }
561
562 map = pvcalls_new_active_socket(fedata,
563 req->u.accept.id_new,
564 req->u.accept.ref,
565 req->u.accept.evtchn,
566 sock);
567 if (!map) {
568 ret = -EFAULT;
569 sock_release(sock);
570 goto out_error;
571 }
572
573 map->sockpass = mappass;
574 iow = &map->ioworker;
575 atomic_inc(&map->read);
576 atomic_inc(&map->io);
577 queue_work(iow->wq, &iow->register_work);
578
579out_error:
580 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
581 rsp->req_id = req->req_id;
582 rsp->cmd = req->cmd;
583 rsp->u.accept.id = req->u.accept.id;
584 rsp->ret = ret;
585 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
586 if (notify)
587 notify_remote_via_irq(fedata->irq);
588
589 mappass->reqcopy.cmd = 0;
590}
591
592static void pvcalls_pass_sk_data_ready(struct sock *sock)
593{
594 struct sockpass_mapping *mappass = sock->sk_user_data;
595 struct pvcalls_fedata *fedata;
596 struct xen_pvcalls_response *rsp;
597 unsigned long flags;
598 int notify;
599
600 if (mappass == NULL)
601 return;
602
603 fedata = mappass->fedata;
604 spin_lock_irqsave(&mappass->copy_lock, flags);
605 if (mappass->reqcopy.cmd == PVCALLS_POLL) {
606 rsp = RING_GET_RESPONSE(&fedata->ring,
607 fedata->ring.rsp_prod_pvt++);
608 rsp->req_id = mappass->reqcopy.req_id;
609 rsp->u.poll.id = mappass->reqcopy.u.poll.id;
610 rsp->cmd = mappass->reqcopy.cmd;
611 rsp->ret = 0;
612
613 mappass->reqcopy.cmd = 0;
614 spin_unlock_irqrestore(&mappass->copy_lock, flags);
615
616 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
617 if (notify)
618 notify_remote_via_irq(mappass->fedata->irq);
619 } else {
620 spin_unlock_irqrestore(&mappass->copy_lock, flags);
621 queue_work(mappass->wq, &mappass->register_work);
622 }
623}
624
625static int pvcalls_back_bind(struct xenbus_device *dev,
626 struct xen_pvcalls_request *req)
627{
628 struct pvcalls_fedata *fedata;
629 int ret;
630 struct sockpass_mapping *map;
631 struct xen_pvcalls_response *rsp;
632
633 fedata = dev_get_drvdata(&dev->dev);
634
635 map = kzalloc(sizeof(*map), GFP_KERNEL);
636 if (map == NULL) {
637 ret = -ENOMEM;
638 goto out;
639 }
640
641 INIT_WORK(&map->register_work, __pvcalls_back_accept);
642 spin_lock_init(&map->copy_lock);
643 map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
644 if (!map->wq) {
645 ret = -ENOMEM;
646 goto out;
647 }
648
649 ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
650 if (ret < 0)
651 goto out;
652
653 ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
654 req->u.bind.len);
655 if (ret < 0)
656 goto out;
657
658 map->fedata = fedata;
659 map->id = req->u.bind.id;
660
661 down(&fedata->socket_lock);
662 ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
663 map);
664 up(&fedata->socket_lock);
665 if (ret)
666 goto out;
667
668 write_lock_bh(&map->sock->sk->sk_callback_lock);
669 map->saved_data_ready = map->sock->sk->sk_data_ready;
670 map->sock->sk->sk_user_data = map;
671 map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
672 write_unlock_bh(&map->sock->sk->sk_callback_lock);
673
674out:
675 if (ret) {
676 if (map && map->sock)
677 sock_release(map->sock);
678 if (map && map->wq)
679 destroy_workqueue(map->wq);
680 kfree(map);
681 }
682 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
683 rsp->req_id = req->req_id;
684 rsp->cmd = req->cmd;
685 rsp->u.bind.id = req->u.bind.id;
686 rsp->ret = ret;
687 return 0;
688}
689
690static int pvcalls_back_listen(struct xenbus_device *dev,
691 struct xen_pvcalls_request *req)
692{
693 struct pvcalls_fedata *fedata;
694 int ret = -EINVAL;
695 struct sockpass_mapping *map;
696 struct xen_pvcalls_response *rsp;
697
698 fedata = dev_get_drvdata(&dev->dev);
699
700 down(&fedata->socket_lock);
701 map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
702 up(&fedata->socket_lock);
703 if (map == NULL)
704 goto out;
705
706 ret = inet_listen(map->sock, req->u.listen.backlog);
707
708out:
709 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
710 rsp->req_id = req->req_id;
711 rsp->cmd = req->cmd;
712 rsp->u.listen.id = req->u.listen.id;
713 rsp->ret = ret;
714 return 0;
715}
716
717static int pvcalls_back_accept(struct xenbus_device *dev,
718 struct xen_pvcalls_request *req)
719{
720 struct pvcalls_fedata *fedata;
721 struct sockpass_mapping *mappass;
722 int ret = -EINVAL;
723 struct xen_pvcalls_response *rsp;
724 unsigned long flags;
725
726 fedata = dev_get_drvdata(&dev->dev);
727
728 down(&fedata->socket_lock);
729 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
730 req->u.accept.id);
731 up(&fedata->socket_lock);
732 if (mappass == NULL)
733 goto out_error;
734
735 /*
736 * Limitation of the current implementation: only support one
737 * concurrent accept or poll call on one socket.
738 */
739 spin_lock_irqsave(&mappass->copy_lock, flags);
740 if (mappass->reqcopy.cmd != 0) {
741 spin_unlock_irqrestore(&mappass->copy_lock, flags);
742 ret = -EINTR;
743 goto out_error;
744 }
745
746 mappass->reqcopy = *req;
747 spin_unlock_irqrestore(&mappass->copy_lock, flags);
748 queue_work(mappass->wq, &mappass->register_work);
749
750 /* Tell the caller we don't need to send back a notification yet */
751 return -1;
752
753out_error:
754 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
755 rsp->req_id = req->req_id;
756 rsp->cmd = req->cmd;
757 rsp->u.accept.id = req->u.accept.id;
758 rsp->ret = ret;
759 return 0;
760}
761
762static int pvcalls_back_poll(struct xenbus_device *dev,
763 struct xen_pvcalls_request *req)
764{
765 struct pvcalls_fedata *fedata;
766 struct sockpass_mapping *mappass;
767 struct xen_pvcalls_response *rsp;
768 struct inet_connection_sock *icsk;
769 struct request_sock_queue *queue;
770 unsigned long flags;
771 int ret;
772 bool data;
773
774 fedata = dev_get_drvdata(&dev->dev);
775
776 down(&fedata->socket_lock);
777 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
778 req->u.poll.id);
779 up(&fedata->socket_lock);
780 if (mappass == NULL)
781 return -EINVAL;
782
783 /*
784 * Limitation of the current implementation: only support one
785 * concurrent accept or poll call on one socket.
786 */
787 spin_lock_irqsave(&mappass->copy_lock, flags);
788 if (mappass->reqcopy.cmd != 0) {
789 ret = -EINTR;
790 goto out;
791 }
792
793 mappass->reqcopy = *req;
794 icsk = inet_csk(mappass->sock->sk);
795 queue = &icsk->icsk_accept_queue;
796 data = queue->rskq_accept_head != NULL;
797 if (data) {
798 mappass->reqcopy.cmd = 0;
799 ret = 0;
800 goto out;
801 }
802 spin_unlock_irqrestore(&mappass->copy_lock, flags);
803
804 /* Tell the caller we don't need to send back a notification yet */
805 return -1;
806
807out:
808 spin_unlock_irqrestore(&mappass->copy_lock, flags);
809
810 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
811 rsp->req_id = req->req_id;
812 rsp->cmd = req->cmd;
813 rsp->u.poll.id = req->u.poll.id;
814 rsp->ret = ret;
815 return 0;
816}
817
818static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
819 struct xen_pvcalls_request *req)
820{
821 int ret = 0;
822
823 switch (req->cmd) {
824 case PVCALLS_SOCKET:
825 ret = pvcalls_back_socket(dev, req);
826 break;
827 case PVCALLS_CONNECT:
828 ret = pvcalls_back_connect(dev, req);
829 break;
830 case PVCALLS_RELEASE:
831 ret = pvcalls_back_release(dev, req);
832 break;
833 case PVCALLS_BIND:
834 ret = pvcalls_back_bind(dev, req);
835 break;
836 case PVCALLS_LISTEN:
837 ret = pvcalls_back_listen(dev, req);
838 break;
839 case PVCALLS_ACCEPT:
840 ret = pvcalls_back_accept(dev, req);
841 break;
842 case PVCALLS_POLL:
843 ret = pvcalls_back_poll(dev, req);
844 break;
845 default:
846 {
847 struct pvcalls_fedata *fedata;
848 struct xen_pvcalls_response *rsp;
849
850 fedata = dev_get_drvdata(&dev->dev);
851 rsp = RING_GET_RESPONSE(
852 &fedata->ring, fedata->ring.rsp_prod_pvt++);
853 rsp->req_id = req->req_id;
854 rsp->cmd = req->cmd;
855 rsp->ret = -ENOTSUPP;
856 break;
857 }
858 }
859 return ret;
860}
861
862static void pvcalls_back_work(struct pvcalls_fedata *fedata)
863{
864 int notify, notify_all = 0, more = 1;
865 struct xen_pvcalls_request req;
866 struct xenbus_device *dev = fedata->dev;
867
868 while (more) {
869 while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
870 RING_COPY_REQUEST(&fedata->ring,
871 fedata->ring.req_cons++,
872 &req);
873
874 if (!pvcalls_back_handle_cmd(dev, &req)) {
875 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
876 &fedata->ring, notify);
877 notify_all += notify;
878 }
879 }
880
881 if (notify_all) {
882 notify_remote_via_irq(fedata->irq);
883 notify_all = 0;
884 }
885
886 RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
887 }
888}
889
890static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
891{
892 struct xenbus_device *dev = dev_id;
893 struct pvcalls_fedata *fedata = NULL;
894
895 if (dev == NULL)
896 return IRQ_HANDLED;
897
898 fedata = dev_get_drvdata(&dev->dev);
899 if (fedata == NULL)
900 return IRQ_HANDLED;
901
902 pvcalls_back_work(fedata);
903 return IRQ_HANDLED;
904}
905
906static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
907{
908 struct sock_mapping *map = sock_map;
909 struct pvcalls_ioworker *iow;
910
911 if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
912 map->sock->sk->sk_user_data != map)
913 return IRQ_HANDLED;
914
915 iow = &map->ioworker;
916
917 atomic_inc(&map->write);
918 atomic_inc(&map->io);
919 queue_work(iow->wq, &iow->register_work);
920
921 return IRQ_HANDLED;
922}
923
924static int backend_connect(struct xenbus_device *dev)
925{
926 int err, evtchn;
927 grant_ref_t ring_ref;
928 struct pvcalls_fedata *fedata = NULL;
929
930 fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
931 if (!fedata)
932 return -ENOMEM;
933
934 fedata->irq = -1;
935 err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
936 &evtchn);
937 if (err != 1) {
938 err = -EINVAL;
939 xenbus_dev_fatal(dev, err, "reading %s/event-channel",
940 dev->otherend);
941 goto error;
942 }
943
944 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
945 if (err != 1) {
946 err = -EINVAL;
947 xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
948 dev->otherend);
949 goto error;
950 }
951
952 err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn);
953 if (err < 0)
954 goto error;
955 fedata->irq = err;
956
957 err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
958 IRQF_ONESHOT, "pvcalls-back", dev);
959 if (err < 0)
960 goto error;
961
962 err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
963 (void **)&fedata->sring);
964 if (err < 0)
965 goto error;
966
967 BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
968 fedata->dev = dev;
969
970 INIT_LIST_HEAD(&fedata->socket_mappings);
971 INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
972 sema_init(&fedata->socket_lock, 1);
973 dev_set_drvdata(&dev->dev, fedata);
974
975 down(&pvcalls_back_global.frontends_lock);
976 list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
977 up(&pvcalls_back_global.frontends_lock);
978
979 return 0;
980
981 error:
982 if (fedata->irq >= 0)
983 unbind_from_irqhandler(fedata->irq, dev);
984 if (fedata->sring != NULL)
985 xenbus_unmap_ring_vfree(dev, fedata->sring);
986 kfree(fedata);
987 return err;
988}
989
990static int backend_disconnect(struct xenbus_device *dev)
991{
992 struct pvcalls_fedata *fedata;
993 struct sock_mapping *map, *n;
994 struct sockpass_mapping *mappass;
995 struct radix_tree_iter iter;
996 void **slot;
997
998
999 fedata = dev_get_drvdata(&dev->dev);
1000
1001 down(&fedata->socket_lock);
1002 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
1003 list_del(&map->list);
1004 pvcalls_back_release_active(dev, fedata, map);
1005 }
1006
1007 radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
1008 mappass = radix_tree_deref_slot(slot);
1009 if (!mappass)
1010 continue;
1011 if (radix_tree_exception(mappass)) {
1012 if (radix_tree_deref_retry(mappass))
1013 slot = radix_tree_iter_retry(&iter);
1014 } else {
1015 radix_tree_delete(&fedata->socketpass_mappings,
1016 mappass->id);
1017 pvcalls_back_release_passive(dev, fedata, mappass);
1018 }
1019 }
1020 up(&fedata->socket_lock);
1021
1022 unbind_from_irqhandler(fedata->irq, dev);
1023 xenbus_unmap_ring_vfree(dev, fedata->sring);
1024
1025 list_del(&fedata->list);
1026 kfree(fedata);
1027 dev_set_drvdata(&dev->dev, NULL);
1028
1029 return 0;
1030}
1031
1032static int pvcalls_back_probe(struct xenbus_device *dev,
1033 const struct xenbus_device_id *id)
1034{
1035 int err, abort;
1036 struct xenbus_transaction xbt;
1037
1038again:
1039 abort = 1;
1040
1041 err = xenbus_transaction_start(&xbt);
1042 if (err) {
1043 pr_warn("%s cannot create xenstore transaction\n", __func__);
1044 return err;
1045 }
1046
1047 err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
1048 PVCALLS_VERSIONS);
1049 if (err) {
1050 pr_warn("%s write out 'versions' failed\n", __func__);
1051 goto abort;
1052 }
1053
1054 err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1055 MAX_RING_ORDER);
1056 if (err) {
1057 pr_warn("%s write out 'max-page-order' failed\n", __func__);
1058 goto abort;
1059 }
1060
1061 err = xenbus_printf(xbt, dev->nodename, "function-calls",
1062 XENBUS_FUNCTIONS_CALLS);
1063 if (err) {
1064 pr_warn("%s write out 'function-calls' failed\n", __func__);
1065 goto abort;
1066 }
1067
1068 abort = 0;
1069abort:
1070 err = xenbus_transaction_end(xbt, abort);
1071 if (err) {
1072 if (err == -EAGAIN && !abort)
1073 goto again;
1074 pr_warn("%s cannot complete xenstore transaction\n", __func__);
1075 return err;
1076 }
1077
1078 if (abort)
1079 return -EFAULT;
1080
1081 xenbus_switch_state(dev, XenbusStateInitWait);
1082
1083 return 0;
1084}
1085
1086static void set_backend_state(struct xenbus_device *dev,
1087 enum xenbus_state state)
1088{
1089 while (dev->state != state) {
1090 switch (dev->state) {
1091 case XenbusStateClosed:
1092 switch (state) {
1093 case XenbusStateInitWait:
1094 case XenbusStateConnected:
1095 xenbus_switch_state(dev, XenbusStateInitWait);
1096 break;
1097 case XenbusStateClosing:
1098 xenbus_switch_state(dev, XenbusStateClosing);
1099 break;
1100 default:
1101 WARN_ON(1);
1102 }
1103 break;
1104 case XenbusStateInitWait:
1105 case XenbusStateInitialised:
1106 switch (state) {
1107 case XenbusStateConnected:
1108 backend_connect(dev);
1109 xenbus_switch_state(dev, XenbusStateConnected);
1110 break;
1111 case XenbusStateClosing:
1112 case XenbusStateClosed:
1113 xenbus_switch_state(dev, XenbusStateClosing);
1114 break;
1115 default:
1116 WARN_ON(1);
1117 }
1118 break;
1119 case XenbusStateConnected:
1120 switch (state) {
1121 case XenbusStateInitWait:
1122 case XenbusStateClosing:
1123 case XenbusStateClosed:
1124 down(&pvcalls_back_global.frontends_lock);
1125 backend_disconnect(dev);
1126 up(&pvcalls_back_global.frontends_lock);
1127 xenbus_switch_state(dev, XenbusStateClosing);
1128 break;
1129 default:
1130 WARN_ON(1);
1131 }
1132 break;
1133 case XenbusStateClosing:
1134 switch (state) {
1135 case XenbusStateInitWait:
1136 case XenbusStateConnected:
1137 case XenbusStateClosed:
1138 xenbus_switch_state(dev, XenbusStateClosed);
1139 break;
1140 default:
1141 WARN_ON(1);
1142 }
1143 break;
1144 default:
1145 WARN_ON(1);
1146 }
1147 }
1148}
1149
1150static void pvcalls_back_changed(struct xenbus_device *dev,
1151 enum xenbus_state frontend_state)
1152{
1153 switch (frontend_state) {
1154 case XenbusStateInitialising:
1155 set_backend_state(dev, XenbusStateInitWait);
1156 break;
1157
1158 case XenbusStateInitialised:
1159 case XenbusStateConnected:
1160 set_backend_state(dev, XenbusStateConnected);
1161 break;
1162
1163 case XenbusStateClosing:
1164 set_backend_state(dev, XenbusStateClosing);
1165 break;
1166
1167 case XenbusStateClosed:
1168 set_backend_state(dev, XenbusStateClosed);
1169 if (xenbus_dev_is_online(dev))
1170 break;
1171 device_unregister(&dev->dev);
1172 break;
1173 case XenbusStateUnknown:
1174 set_backend_state(dev, XenbusStateClosed);
1175 device_unregister(&dev->dev);
1176 break;
1177
1178 default:
1179 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1180 frontend_state);
1181 break;
1182 }
1183}
1184
1185static int pvcalls_back_remove(struct xenbus_device *dev)
1186{
1187 return 0;
1188}
1189
1190static int pvcalls_back_uevent(struct xenbus_device *xdev,
1191 struct kobj_uevent_env *env)
1192{
1193 return 0;
1194}
1195
1196static const struct xenbus_device_id pvcalls_back_ids[] = {
1197 { "pvcalls" },
1198 { "" }
1199};
1200
1201static struct xenbus_driver pvcalls_back_driver = {
1202 .ids = pvcalls_back_ids,
1203 .probe = pvcalls_back_probe,
1204 .remove = pvcalls_back_remove,
1205 .uevent = pvcalls_back_uevent,
1206 .otherend_changed = pvcalls_back_changed,
1207};
1208
1209static int __init pvcalls_back_init(void)
1210{
1211 int ret;
1212
1213 if (!xen_domain())
1214 return -ENODEV;
1215
1216 ret = xenbus_register_backend(&pvcalls_back_driver);
1217 if (ret < 0)
1218 return ret;
1219
1220 sema_init(&pvcalls_back_global.frontends_lock, 1);
1221 INIT_LIST_HEAD(&pvcalls_back_global.frontends);
1222 return 0;
1223}
1224module_init(pvcalls_back_init);
1225
1226static void __exit pvcalls_back_fin(void)
1227{
1228 struct pvcalls_fedata *fedata, *nfedata;
1229
1230 down(&pvcalls_back_global.frontends_lock);
1231 list_for_each_entry_safe(fedata, nfedata,
1232 &pvcalls_back_global.frontends, list) {
1233 backend_disconnect(fedata->dev);
1234 }
1235 up(&pvcalls_back_global.frontends_lock);
1236
1237 xenbus_unregister_driver(&pvcalls_back_driver);
1238}
1239
1240module_exit(pvcalls_back_fin);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index b70a38b7fa84..1b4fed72f573 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -149,24 +149,6 @@ DECLARE_EVENT_CLASS(xen_mmu__set_pte,
149DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte); 149DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte);
150DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic); 150DEFINE_XEN_MMU_SET_PTE(xen_mmu_set_pte_atomic);
151 151
152TRACE_EVENT(xen_mmu_set_domain_pte,
153 TP_PROTO(pte_t *ptep, pte_t pteval, unsigned domid),
154 TP_ARGS(ptep, pteval, domid),
155 TP_STRUCT__entry(
156 __field(pte_t *, ptep)
157 __field(pteval_t, pteval)
158 __field(unsigned, domid)
159 ),
160 TP_fast_assign(__entry->ptep = ptep;
161 __entry->pteval = pteval.pte;
162 __entry->domid = domid),
163 TP_printk("ptep %p pteval %0*llx (raw %0*llx) domid %u",
164 __entry->ptep,
165 (int)sizeof(pteval_t) * 2, (unsigned long long)pte_val(native_make_pte(__entry->pteval)),
166 (int)sizeof(pteval_t) * 2, (unsigned long long)__entry->pteval,
167 __entry->domid)
168 );
169
170TRACE_EVENT(xen_mmu_set_pte_at, 152TRACE_EVENT(xen_mmu_set_pte_at,
171 TP_PROTO(struct mm_struct *mm, unsigned long addr, 153 TP_PROTO(struct mm_struct *mm, unsigned long addr,
172 pte_t *ptep, pte_t pteval), 154 pte_t *ptep, pte_t pteval),
@@ -266,16 +248,6 @@ TRACE_EVENT(xen_mmu_set_p4d,
266 (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)), 248 (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
267 (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval) 249 (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
268 ); 250 );
269
270TRACE_EVENT(xen_mmu_pud_clear,
271 TP_PROTO(pud_t *pudp),
272 TP_ARGS(pudp),
273 TP_STRUCT__entry(
274 __field(pud_t *, pudp)
275 ),
276 TP_fast_assign(__entry->pudp = pudp),
277 TP_printk("pudp %p", __entry->pudp)
278 );
279#else 251#else
280 252
281TRACE_EVENT(xen_mmu_set_pud, 253TRACE_EVENT(xen_mmu_set_pud,
@@ -295,16 +267,6 @@ TRACE_EVENT(xen_mmu_set_pud,
295 267
296#endif 268#endif
297 269
298TRACE_EVENT(xen_mmu_pgd_clear,
299 TP_PROTO(pgd_t *pgdp),
300 TP_ARGS(pgdp),
301 TP_STRUCT__entry(
302 __field(pgd_t *, pgdp)
303 ),
304 TP_fast_assign(__entry->pgdp = pgdp),
305 TP_printk("pgdp %p", __entry->pgdp)
306 );
307
308DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot, 270DECLARE_EVENT_CLASS(xen_mmu_ptep_modify_prot,
309 TP_PROTO(struct mm_struct *mm, unsigned long addr, 271 TP_PROTO(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep, pte_t pteval), 272 pte_t *ptep, pte_t pteval),
diff --git a/include/xen/interface/io/pvcalls.h b/include/xen/interface/io/pvcalls.h
new file mode 100644
index 000000000000..ccf97b817e72
--- /dev/null
+++ b/include/xen/interface/io/pvcalls.h
@@ -0,0 +1,121 @@
1#ifndef __XEN_PUBLIC_IO_XEN_PVCALLS_H__
2#define __XEN_PUBLIC_IO_XEN_PVCALLS_H__
3
4#include <linux/net.h>
5#include <xen/interface/io/ring.h>
6#include <xen/interface/grant_table.h>
7
8/* "1" means socket, connect, release, bind, listen, accept and poll */
9#define XENBUS_FUNCTIONS_CALLS "1"
10
11/*
12 * See docs/misc/pvcalls.markdown in xen.git for the full specification:
13 * https://xenbits.xen.org/docs/unstable/misc/pvcalls.html
14 */
15struct pvcalls_data_intf {
16 RING_IDX in_cons, in_prod, in_error;
17
18 uint8_t pad1[52];
19
20 RING_IDX out_cons, out_prod, out_error;
21
22 uint8_t pad2[52];
23
24 RING_IDX ring_order;
25 grant_ref_t ref[];
26};
27DEFINE_XEN_FLEX_RING(pvcalls);
28
29#define PVCALLS_SOCKET 0
30#define PVCALLS_CONNECT 1
31#define PVCALLS_RELEASE 2
32#define PVCALLS_BIND 3
33#define PVCALLS_LISTEN 4
34#define PVCALLS_ACCEPT 5
35#define PVCALLS_POLL 6
36
37struct xen_pvcalls_request {
38 uint32_t req_id; /* private to guest, echoed in response */
39 uint32_t cmd; /* command to execute */
40 union {
41 struct xen_pvcalls_socket {
42 uint64_t id;
43 uint32_t domain;
44 uint32_t type;
45 uint32_t protocol;
46 } socket;
47 struct xen_pvcalls_connect {
48 uint64_t id;
49 uint8_t addr[28];
50 uint32_t len;
51 uint32_t flags;
52 grant_ref_t ref;
53 uint32_t evtchn;
54 } connect;
55 struct xen_pvcalls_release {
56 uint64_t id;
57 uint8_t reuse;
58 } release;
59 struct xen_pvcalls_bind {
60 uint64_t id;
61 uint8_t addr[28];
62 uint32_t len;
63 } bind;
64 struct xen_pvcalls_listen {
65 uint64_t id;
66 uint32_t backlog;
67 } listen;
68 struct xen_pvcalls_accept {
69 uint64_t id;
70 uint64_t id_new;
71 grant_ref_t ref;
72 uint32_t evtchn;
73 } accept;
74 struct xen_pvcalls_poll {
75 uint64_t id;
76 } poll;
77 /* dummy member to force sizeof(struct xen_pvcalls_request)
78 * to match across archs */
79 struct xen_pvcalls_dummy {
80 uint8_t dummy[56];
81 } dummy;
82 } u;
83};
84
85struct xen_pvcalls_response {
86 uint32_t req_id;
87 uint32_t cmd;
88 int32_t ret;
89 uint32_t pad;
90 union {
91 struct _xen_pvcalls_socket {
92 uint64_t id;
93 } socket;
94 struct _xen_pvcalls_connect {
95 uint64_t id;
96 } connect;
97 struct _xen_pvcalls_release {
98 uint64_t id;
99 } release;
100 struct _xen_pvcalls_bind {
101 uint64_t id;
102 } bind;
103 struct _xen_pvcalls_listen {
104 uint64_t id;
105 } listen;
106 struct _xen_pvcalls_accept {
107 uint64_t id;
108 } accept;
109 struct _xen_pvcalls_poll {
110 uint64_t id;
111 } poll;
112 struct _xen_pvcalls_dummy {
113 uint8_t dummy[8];
114 } dummy;
115 } u;
116};
117
118DEFINE_RING_TYPES(xen_pvcalls, struct xen_pvcalls_request,
119 struct xen_pvcalls_response);
120
121#endif
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index c79456855539..e547088ceb0e 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -9,6 +9,8 @@
9#ifndef __XEN_PUBLIC_IO_RING_H__ 9#ifndef __XEN_PUBLIC_IO_RING_H__
10#define __XEN_PUBLIC_IO_RING_H__ 10#define __XEN_PUBLIC_IO_RING_H__
11 11
12#include <xen/interface/grant_table.h>
13
12typedef unsigned int RING_IDX; 14typedef unsigned int RING_IDX;
13 15
14/* Round a 32-bit unsigned constant down to the nearest power of two. */ 16/* Round a 32-bit unsigned constant down to the nearest power of two. */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 6e8b7fc79801..28c59ca529d7 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -13,11 +13,16 @@ extern enum xen_domain_type xen_domain_type;
13#define xen_domain_type XEN_NATIVE 13#define xen_domain_type XEN_NATIVE
14#endif 14#endif
15 15
16#ifdef CONFIG_XEN_PVH
17extern bool xen_pvh;
18#else
19#define xen_pvh 0
20#endif
21
16#define xen_domain() (xen_domain_type != XEN_NATIVE) 22#define xen_domain() (xen_domain_type != XEN_NATIVE)
17#define xen_pv_domain() (xen_domain() && \ 23#define xen_pv_domain() (xen_domain_type == XEN_PV_DOMAIN)
18 xen_domain_type == XEN_PV_DOMAIN) 24#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
19#define xen_hvm_domain() (xen_domain() && \ 25#define xen_pvh_domain() (xen_pvh)
20 xen_domain_type == XEN_HVM_DOMAIN)
21 26
22#ifdef CONFIG_XEN_DOM0 27#ifdef CONFIG_XEN_DOM0
23#include <xen/interface/xen.h> 28#include <xen/interface/xen.h>
@@ -29,11 +34,4 @@ extern enum xen_domain_type xen_domain_type;
29#define xen_initial_domain() (0) 34#define xen_initial_domain() (0)
30#endif /* CONFIG_XEN_DOM0 */ 35#endif /* CONFIG_XEN_DOM0 */
31 36
32#ifdef CONFIG_XEN_PVH
33extern bool xen_pvh;
34#define xen_pvh_domain() (xen_hvm_domain() && xen_pvh)
35#else
36#define xen_pvh_domain() (0)
37#endif
38
39#endif /* _XEN_XEN_H */ 37#endif /* _XEN_XEN_H */