aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefano Stabellini <sstabellini@kernel.org>2017-07-06 14:01:07 -0400
committerBoris Ostrovsky <boris.ostrovsky@oracle.com>2017-08-31 09:45:55 -0400
commit6f474e711617d00ef3be31f454301da00d0eb5ac (patch)
tree2e1c7f8a9f2b7ca0b3efd4fee3cac0f1c9f45206
parent8ce3f7626f96b985142c5e93f9f2dc338b0c21bf (diff)
xen/pvcalls: implement accept command
Implement the accept command by calling inet_accept. To avoid blocking in the kernel, call inet_accept(O_NONBLOCK) from a workqueue, which get scheduled on sk_data_ready (for a passive socket, it means that there are connections to accept). Use the reqcopy field to store the request. Accept the new socket from the delayed work function, create a new sock_mapping for it, map the indexes page and data ring, and reply to the other end. Allocate an ioworker for the socket. Only support one outstanding blocking accept request for every socket at any time. Add a field to sock_mapping to remember the passive socket from which an active socket was created. [ boris: fixed whitespaces ] Signed-off-by: Stefano Stabellini <stefano@aporeto.com> Reviewed-by: Juergen Gross <jgross@suse.com> CC: boris.ostrovsky@oracle.com CC: jgross@suse.com Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-rw-r--r--drivers/xen/pvcalls-back.c113
1 files changed, 113 insertions, 0 deletions
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 648fa75f3cc2..9a4bdc5e5d34 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -62,6 +62,7 @@ struct pvcalls_ioworker {
62struct sock_mapping { 62struct sock_mapping {
63 struct list_head list; 63 struct list_head list;
64 struct pvcalls_fedata *fedata; 64 struct pvcalls_fedata *fedata;
65 struct sockpass_mapping *sockpass;
65 struct socket *sock; 66 struct socket *sock;
66 uint64_t id; 67 uint64_t id;
67 grant_ref_t ref; 68 grant_ref_t ref;
@@ -282,10 +283,83 @@ static int pvcalls_back_release(struct xenbus_device *dev,
282 283
283static void __pvcalls_back_accept(struct work_struct *work) 284static void __pvcalls_back_accept(struct work_struct *work)
284{ 285{
286 struct sockpass_mapping *mappass = container_of(
287 work, struct sockpass_mapping, register_work);
288 struct sock_mapping *map;
289 struct pvcalls_ioworker *iow;
290 struct pvcalls_fedata *fedata;
291 struct socket *sock;
292 struct xen_pvcalls_response *rsp;
293 struct xen_pvcalls_request *req;
294 int notify;
295 int ret = -EINVAL;
296 unsigned long flags;
297
298 fedata = mappass->fedata;
299 /*
300 * __pvcalls_back_accept can race against pvcalls_back_accept.
301 * We only need to check the value of "cmd" on read. It could be
302 * done atomically, but to simplify the code on the write side, we
303 * use a spinlock.
304 */
305 spin_lock_irqsave(&mappass->copy_lock, flags);
306 req = &mappass->reqcopy;
307 if (req->cmd != PVCALLS_ACCEPT) {
308 spin_unlock_irqrestore(&mappass->copy_lock, flags);
309 return;
310 }
311 spin_unlock_irqrestore(&mappass->copy_lock, flags);
312
313 sock = sock_alloc();
314 if (sock == NULL)
315 goto out_error;
316 sock->type = mappass->sock->type;
317 sock->ops = mappass->sock->ops;
318
319 ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
320 if (ret == -EAGAIN) {
321 sock_release(sock);
322 goto out_error;
323 }
324
325 map = pvcalls_new_active_socket(fedata,
326 req->u.accept.id_new,
327 req->u.accept.ref,
328 req->u.accept.evtchn,
329 sock);
330 if (!map) {
331 ret = -EFAULT;
332 sock_release(sock);
333 goto out_error;
334 }
335
336 map->sockpass = mappass;
337 iow = &map->ioworker;
338 atomic_inc(&map->read);
339 atomic_inc(&map->io);
340 queue_work(iow->wq, &iow->register_work);
341
342out_error:
343 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
344 rsp->req_id = req->req_id;
345 rsp->cmd = req->cmd;
346 rsp->u.accept.id = req->u.accept.id;
347 rsp->ret = ret;
348 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
349 if (notify)
350 notify_remote_via_irq(fedata->irq);
351
352 mappass->reqcopy.cmd = 0;
285} 353}
286 354
287static void pvcalls_pass_sk_data_ready(struct sock *sock) 355static void pvcalls_pass_sk_data_ready(struct sock *sock)
288{ 356{
357 struct sockpass_mapping *mappass = sock->sk_user_data;
358
359 if (mappass == NULL)
360 return;
361
362 queue_work(mappass->wq, &mappass->register_work);
289} 363}
290 364
291static int pvcalls_back_bind(struct xenbus_device *dev, 365static int pvcalls_back_bind(struct xenbus_device *dev,
@@ -383,6 +457,45 @@ out:
383static int pvcalls_back_accept(struct xenbus_device *dev, 457static int pvcalls_back_accept(struct xenbus_device *dev,
384 struct xen_pvcalls_request *req) 458 struct xen_pvcalls_request *req)
385{ 459{
460 struct pvcalls_fedata *fedata;
461 struct sockpass_mapping *mappass;
462 int ret = -EINVAL;
463 struct xen_pvcalls_response *rsp;
464 unsigned long flags;
465
466 fedata = dev_get_drvdata(&dev->dev);
467
468 down(&fedata->socket_lock);
469 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
470 req->u.accept.id);
471 up(&fedata->socket_lock);
472 if (mappass == NULL)
473 goto out_error;
474
475 /*
476 * Limitation of the current implementation: only support one
477 * concurrent accept or poll call on one socket.
478 */
479 spin_lock_irqsave(&mappass->copy_lock, flags);
480 if (mappass->reqcopy.cmd != 0) {
481 spin_unlock_irqrestore(&mappass->copy_lock, flags);
482 ret = -EINTR;
483 goto out_error;
484 }
485
486 mappass->reqcopy = *req;
487 spin_unlock_irqrestore(&mappass->copy_lock, flags);
488 queue_work(mappass->wq, &mappass->register_work);
489
490 /* Tell the caller we don't need to send back a notification yet */
491 return -1;
492
493out_error:
494 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
495 rsp->req_id = req->req_id;
496 rsp->cmd = req->cmd;
497 rsp->u.accept.id = req->u.accept.id;
498 rsp->ret = ret;
386 return 0; 499 return 0;
387} 500}
388 501