diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 11:37:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 11:37:05 -0500 |
commit | b7dfde956daee23f4439d0c8562a5e38b43e79d9 (patch) | |
tree | 2ed71fb5c5eac6957fd1e1ad0a67be6c3282167a /drivers/char/virtio_console.c | |
parent | 03c850ec327c42a97e44c448b75983e12da417d9 (diff) | |
parent | 1b6370463e88b0c1c317de16d7b962acc1dab4f2 (diff) |
Merge tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux
Pull virtio update from Rusty Russell:
"Some nice cleanups, and even a patch my wife did as a "live" demo for
Latinoware 2012.
There's a slightly non-trivial merge in virtio-net, as we cleaned up
the virtio add_buf interface while DaveM accepted the mq virtio-net
patches."
* tag 'virtio-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (27 commits)
virtio_console: Add support for remoteproc serial
virtio_console: Merge struct buffer_token into struct port_buffer
virtio: add drv_to_virtio to make code clearly
virtio: use dev_to_virtio wrapper in virtio
virtio-mmio: Fix irq parsing in command line parameter
virtio_console: Free buffers from out-queue upon close
virtio: Convert dev_printk(KERN_<LEVEL> to dev_<level>(
virtio_console: Use kmalloc instead of kzalloc
virtio_console: Free buffer if splice fails
virtio: tools: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: scsi: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: rpmsg: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: net: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: console: make it clear that virtqueue_add_buf() no longer returns > 0
virtio: make virtqueue_add_buf() returning 0 on success, not capacity.
virtio: console: don't rely on virtqueue_add_buf() returning capacity.
virtio_net: don't rely on virtqueue_add_buf() returning capacity.
virtio-net: remove unused skb_vnet_hdr->num_sg field
virtio-net: correct capacity math on ring full
virtio: move queue_index and num_free fields into core struct virtqueue.
...
Diffstat (limited to 'drivers/char/virtio_console.c')
-rw-r--r-- | drivers/char/virtio_console.c | 329 |
1 files changed, 230 insertions, 99 deletions
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 90493d4ead1f..c594cb16c37b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -37,8 +37,12 @@ | |||
37 | #include <linux/wait.h> | 37 | #include <linux/wait.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/workqueue.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/dma-mapping.h> | ||
41 | #include <linux/kconfig.h> | ||
40 | #include "../tty/hvc/hvc_console.h" | 42 | #include "../tty/hvc/hvc_console.h" |
41 | 43 | ||
44 | #define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC) | ||
45 | |||
42 | /* | 46 | /* |
43 | * This is a global struct for storing common data for all the devices | 47 | * This is a global struct for storing common data for all the devices |
44 | * this driver handles. | 48 | * this driver handles. |
@@ -111,6 +115,21 @@ struct port_buffer { | |||
111 | size_t len; | 115 | size_t len; |
112 | /* offset in the buf from which to consume data */ | 116 | /* offset in the buf from which to consume data */ |
113 | size_t offset; | 117 | size_t offset; |
118 | |||
119 | /* DMA address of buffer */ | ||
120 | dma_addr_t dma; | ||
121 | |||
122 | /* Device we got DMA memory from */ | ||
123 | struct device *dev; | ||
124 | |||
125 | /* List of pending dma buffers to free */ | ||
126 | struct list_head list; | ||
127 | |||
128 | /* If sgpages == 0 then buf is used */ | ||
129 | unsigned int sgpages; | ||
130 | |||
131 | /* sg is used if spages > 0. sg must be the last in is struct */ | ||
132 | struct scatterlist sg[0]; | ||
114 | }; | 133 | }; |
115 | 134 | ||
116 | /* | 135 | /* |
@@ -325,6 +344,11 @@ static bool is_console_port(struct port *port) | |||
325 | return false; | 344 | return false; |
326 | } | 345 | } |
327 | 346 | ||
347 | static bool is_rproc_serial(const struct virtio_device *vdev) | ||
348 | { | ||
349 | return is_rproc_enabled && vdev->id.device == VIRTIO_ID_RPROC_SERIAL; | ||
350 | } | ||
351 | |||
328 | static inline bool use_multiport(struct ports_device *portdev) | 352 | static inline bool use_multiport(struct ports_device *portdev) |
329 | { | 353 | { |
330 | /* | 354 | /* |
@@ -336,20 +360,110 @@ static inline bool use_multiport(struct ports_device *portdev) | |||
336 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); | 360 | return portdev->vdev->features[0] & (1 << VIRTIO_CONSOLE_F_MULTIPORT); |
337 | } | 361 | } |
338 | 362 | ||
339 | static void free_buf(struct port_buffer *buf) | 363 | static DEFINE_SPINLOCK(dma_bufs_lock); |
364 | static LIST_HEAD(pending_free_dma_bufs); | ||
365 | |||
366 | static void free_buf(struct port_buffer *buf, bool can_sleep) | ||
340 | { | 367 | { |
341 | kfree(buf->buf); | 368 | unsigned int i; |
369 | |||
370 | for (i = 0; i < buf->sgpages; i++) { | ||
371 | struct page *page = sg_page(&buf->sg[i]); | ||
372 | if (!page) | ||
373 | break; | ||
374 | put_page(page); | ||
375 | } | ||
376 | |||
377 | if (!buf->dev) { | ||
378 | kfree(buf->buf); | ||
379 | } else if (is_rproc_enabled) { | ||
380 | unsigned long flags; | ||
381 | |||
382 | /* dma_free_coherent requires interrupts to be enabled. */ | ||
383 | if (!can_sleep) { | ||
384 | /* queue up dma-buffers to be freed later */ | ||
385 | spin_lock_irqsave(&dma_bufs_lock, flags); | ||
386 | list_add_tail(&buf->list, &pending_free_dma_bufs); | ||
387 | spin_unlock_irqrestore(&dma_bufs_lock, flags); | ||
388 | return; | ||
389 | } | ||
390 | dma_free_coherent(buf->dev, buf->size, buf->buf, buf->dma); | ||
391 | |||
392 | /* Release device refcnt and allow it to be freed */ | ||
393 | put_device(buf->dev); | ||
394 | } | ||
395 | |||
342 | kfree(buf); | 396 | kfree(buf); |
343 | } | 397 | } |
344 | 398 | ||
345 | static struct port_buffer *alloc_buf(size_t buf_size) | 399 | static void reclaim_dma_bufs(void) |
400 | { | ||
401 | unsigned long flags; | ||
402 | struct port_buffer *buf, *tmp; | ||
403 | LIST_HEAD(tmp_list); | ||
404 | |||
405 | if (list_empty(&pending_free_dma_bufs)) | ||
406 | return; | ||
407 | |||
408 | /* Create a copy of the pending_free_dma_bufs while holding the lock */ | ||
409 | spin_lock_irqsave(&dma_bufs_lock, flags); | ||
410 | list_cut_position(&tmp_list, &pending_free_dma_bufs, | ||
411 | pending_free_dma_bufs.prev); | ||
412 | spin_unlock_irqrestore(&dma_bufs_lock, flags); | ||
413 | |||
414 | /* Release the dma buffers, without irqs enabled */ | ||
415 | list_for_each_entry_safe(buf, tmp, &tmp_list, list) { | ||
416 | list_del(&buf->list); | ||
417 | free_buf(buf, true); | ||
418 | } | ||
419 | } | ||
420 | |||
421 | static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size, | ||
422 | int pages) | ||
346 | { | 423 | { |
347 | struct port_buffer *buf; | 424 | struct port_buffer *buf; |
348 | 425 | ||
349 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); | 426 | reclaim_dma_bufs(); |
427 | |||
428 | /* | ||
429 | * Allocate buffer and the sg list. The sg list array is allocated | ||
430 | * directly after the port_buffer struct. | ||
431 | */ | ||
432 | buf = kmalloc(sizeof(*buf) + sizeof(struct scatterlist) * pages, | ||
433 | GFP_KERNEL); | ||
350 | if (!buf) | 434 | if (!buf) |
351 | goto fail; | 435 | goto fail; |
352 | buf->buf = kzalloc(buf_size, GFP_KERNEL); | 436 | |
437 | buf->sgpages = pages; | ||
438 | if (pages > 0) { | ||
439 | buf->dev = NULL; | ||
440 | buf->buf = NULL; | ||
441 | return buf; | ||
442 | } | ||
443 | |||
444 | if (is_rproc_serial(vq->vdev)) { | ||
445 | /* | ||
446 | * Allocate DMA memory from ancestor. When a virtio | ||
447 | * device is created by remoteproc, the DMA memory is | ||
448 | * associated with the grandparent device: | ||
449 | * vdev => rproc => platform-dev. | ||
450 | * The code here would have been less quirky if | ||
451 | * DMA_MEMORY_INCLUDES_CHILDREN had been supported | ||
452 | * in dma-coherent.c | ||
453 | */ | ||
454 | if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent) | ||
455 | goto free_buf; | ||
456 | buf->dev = vq->vdev->dev.parent->parent; | ||
457 | |||
458 | /* Increase device refcnt to avoid freeing it */ | ||
459 | get_device(buf->dev); | ||
460 | buf->buf = dma_alloc_coherent(buf->dev, buf_size, &buf->dma, | ||
461 | GFP_KERNEL); | ||
462 | } else { | ||
463 | buf->dev = NULL; | ||
464 | buf->buf = kmalloc(buf_size, GFP_KERNEL); | ||
465 | } | ||
466 | |||
353 | if (!buf->buf) | 467 | if (!buf->buf) |
354 | goto free_buf; | 468 | goto free_buf; |
355 | buf->len = 0; | 469 | buf->len = 0; |
@@ -396,6 +510,8 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf) | |||
396 | 510 | ||
397 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); | 511 | ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC); |
398 | virtqueue_kick(vq); | 512 | virtqueue_kick(vq); |
513 | if (!ret) | ||
514 | ret = vq->num_free; | ||
399 | return ret; | 515 | return ret; |
400 | } | 516 | } |
401 | 517 | ||
@@ -416,7 +532,7 @@ static void discard_port_data(struct port *port) | |||
416 | port->stats.bytes_discarded += buf->len - buf->offset; | 532 | port->stats.bytes_discarded += buf->len - buf->offset; |
417 | if (add_inbuf(port->in_vq, buf) < 0) { | 533 | if (add_inbuf(port->in_vq, buf) < 0) { |
418 | err++; | 534 | err++; |
419 | free_buf(buf); | 535 | free_buf(buf, false); |
420 | } | 536 | } |
421 | port->inbuf = NULL; | 537 | port->inbuf = NULL; |
422 | buf = get_inbuf(port); | 538 | buf = get_inbuf(port); |
@@ -459,7 +575,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
459 | vq = portdev->c_ovq; | 575 | vq = portdev->c_ovq; |
460 | 576 | ||
461 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 577 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
462 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) { | 578 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { |
463 | virtqueue_kick(vq); | 579 | virtqueue_kick(vq); |
464 | while (!virtqueue_get_buf(vq, &len)) | 580 | while (!virtqueue_get_buf(vq, &len)) |
465 | cpu_relax(); | 581 | cpu_relax(); |
@@ -476,55 +592,29 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, | |||
476 | return 0; | 592 | return 0; |
477 | } | 593 | } |
478 | 594 | ||
479 | struct buffer_token { | ||
480 | union { | ||
481 | void *buf; | ||
482 | struct scatterlist *sg; | ||
483 | } u; | ||
484 | /* If sgpages == 0 then buf is used, else sg is used */ | ||
485 | unsigned int sgpages; | ||
486 | }; | ||
487 | |||
488 | static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) | ||
489 | { | ||
490 | int i; | ||
491 | struct page *page; | ||
492 | |||
493 | for (i = 0; i < nrpages; i++) { | ||
494 | page = sg_page(&sg[i]); | ||
495 | if (!page) | ||
496 | break; | ||
497 | put_page(page); | ||
498 | } | ||
499 | kfree(sg); | ||
500 | } | ||
501 | 595 | ||
502 | /* Callers must take the port->outvq_lock */ | 596 | /* Callers must take the port->outvq_lock */ |
503 | static void reclaim_consumed_buffers(struct port *port) | 597 | static void reclaim_consumed_buffers(struct port *port) |
504 | { | 598 | { |
505 | struct buffer_token *tok; | 599 | struct port_buffer *buf; |
506 | unsigned int len; | 600 | unsigned int len; |
507 | 601 | ||
508 | if (!port->portdev) { | 602 | if (!port->portdev) { |
509 | /* Device has been unplugged. vqs are already gone. */ | 603 | /* Device has been unplugged. vqs are already gone. */ |
510 | return; | 604 | return; |
511 | } | 605 | } |
512 | while ((tok = virtqueue_get_buf(port->out_vq, &len))) { | 606 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { |
513 | if (tok->sgpages) | 607 | free_buf(buf, false); |
514 | reclaim_sg_pages(tok->u.sg, tok->sgpages); | ||
515 | else | ||
516 | kfree(tok->u.buf); | ||
517 | kfree(tok); | ||
518 | port->outvq_full = false; | 608 | port->outvq_full = false; |
519 | } | 609 | } |
520 | } | 610 | } |
521 | 611 | ||
522 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | 612 | static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, |
523 | int nents, size_t in_count, | 613 | int nents, size_t in_count, |
524 | struct buffer_token *tok, bool nonblock) | 614 | void *data, bool nonblock) |
525 | { | 615 | { |
526 | struct virtqueue *out_vq; | 616 | struct virtqueue *out_vq; |
527 | ssize_t ret; | 617 | int err; |
528 | unsigned long flags; | 618 | unsigned long flags; |
529 | unsigned int len; | 619 | unsigned int len; |
530 | 620 | ||
@@ -534,17 +624,17 @@ static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, | |||
534 | 624 | ||
535 | reclaim_consumed_buffers(port); | 625 | reclaim_consumed_buffers(port); |
536 | 626 | ||
537 | ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); | 627 | err = virtqueue_add_buf(out_vq, sg, nents, 0, data, GFP_ATOMIC); |
538 | 628 | ||
539 | /* Tell Host to go! */ | 629 | /* Tell Host to go! */ |
540 | virtqueue_kick(out_vq); | 630 | virtqueue_kick(out_vq); |
541 | 631 | ||
542 | if (ret < 0) { | 632 | if (err) { |
543 | in_count = 0; | 633 | in_count = 0; |
544 | goto done; | 634 | goto done; |
545 | } | 635 | } |
546 | 636 | ||
547 | if (ret == 0) | 637 | if (out_vq->num_free == 0) |
548 | port->outvq_full = true; | 638 | port->outvq_full = true; |
549 | 639 | ||
550 | if (nonblock) | 640 | if (nonblock) |
@@ -572,37 +662,6 @@ done: | |||
572 | return in_count; | 662 | return in_count; |
573 | } | 663 | } |
574 | 664 | ||
575 | static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, | ||
576 | bool nonblock) | ||
577 | { | ||
578 | struct scatterlist sg[1]; | ||
579 | struct buffer_token *tok; | ||
580 | |||
581 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
582 | if (!tok) | ||
583 | return -ENOMEM; | ||
584 | tok->sgpages = 0; | ||
585 | tok->u.buf = in_buf; | ||
586 | |||
587 | sg_init_one(sg, in_buf, in_count); | ||
588 | |||
589 | return __send_to_port(port, sg, 1, in_count, tok, nonblock); | ||
590 | } | ||
591 | |||
592 | static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, | ||
593 | size_t in_count, bool nonblock) | ||
594 | { | ||
595 | struct buffer_token *tok; | ||
596 | |||
597 | tok = kmalloc(sizeof(*tok), GFP_ATOMIC); | ||
598 | if (!tok) | ||
599 | return -ENOMEM; | ||
600 | tok->sgpages = nents; | ||
601 | tok->u.sg = sg; | ||
602 | |||
603 | return __send_to_port(port, sg, nents, in_count, tok, nonblock); | ||
604 | } | ||
605 | |||
606 | /* | 665 | /* |
607 | * Give out the data that's requested from the buffer that we have | 666 | * Give out the data that's requested from the buffer that we have |
608 | * queued up. | 667 | * queued up. |
@@ -748,9 +807,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
748 | size_t count, loff_t *offp) | 807 | size_t count, loff_t *offp) |
749 | { | 808 | { |
750 | struct port *port; | 809 | struct port *port; |
751 | char *buf; | 810 | struct port_buffer *buf; |
752 | ssize_t ret; | 811 | ssize_t ret; |
753 | bool nonblock; | 812 | bool nonblock; |
813 | struct scatterlist sg[1]; | ||
754 | 814 | ||
755 | /* Userspace could be out to fool us */ | 815 | /* Userspace could be out to fool us */ |
756 | if (!count) | 816 | if (!count) |
@@ -766,11 +826,11 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
766 | 826 | ||
767 | count = min((size_t)(32 * 1024), count); | 827 | count = min((size_t)(32 * 1024), count); |
768 | 828 | ||
769 | buf = kmalloc(count, GFP_KERNEL); | 829 | buf = alloc_buf(port->out_vq, count, 0); |
770 | if (!buf) | 830 | if (!buf) |
771 | return -ENOMEM; | 831 | return -ENOMEM; |
772 | 832 | ||
773 | ret = copy_from_user(buf, ubuf, count); | 833 | ret = copy_from_user(buf->buf, ubuf, count); |
774 | if (ret) { | 834 | if (ret) { |
775 | ret = -EFAULT; | 835 | ret = -EFAULT; |
776 | goto free_buf; | 836 | goto free_buf; |
@@ -784,13 +844,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, | |||
784 | * through to the host. | 844 | * through to the host. |
785 | */ | 845 | */ |
786 | nonblock = true; | 846 | nonblock = true; |
787 | ret = send_buf(port, buf, count, nonblock); | 847 | sg_init_one(sg, buf->buf, count); |
848 | ret = __send_to_port(port, sg, 1, count, buf, nonblock); | ||
788 | 849 | ||
789 | if (nonblock && ret > 0) | 850 | if (nonblock && ret > 0) |
790 | goto out; | 851 | goto out; |
791 | 852 | ||
792 | free_buf: | 853 | free_buf: |
793 | kfree(buf); | 854 | free_buf(buf, true); |
794 | out: | 855 | out: |
795 | return ret; | 856 | return ret; |
796 | } | 857 | } |
@@ -856,6 +917,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
856 | struct port *port = filp->private_data; | 917 | struct port *port = filp->private_data; |
857 | struct sg_list sgl; | 918 | struct sg_list sgl; |
858 | ssize_t ret; | 919 | ssize_t ret; |
920 | struct port_buffer *buf; | ||
859 | struct splice_desc sd = { | 921 | struct splice_desc sd = { |
860 | .total_len = len, | 922 | .total_len = len, |
861 | .flags = flags, | 923 | .flags = flags, |
@@ -863,22 +925,34 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
863 | .u.data = &sgl, | 925 | .u.data = &sgl, |
864 | }; | 926 | }; |
865 | 927 | ||
928 | /* | ||
929 | * Rproc_serial does not yet support splice. To support splice | ||
930 | * pipe_to_sg() must allocate dma-buffers and copy content from | ||
931 | * regular pages to dma pages. And alloc_buf and free_buf must | ||
932 | * support allocating and freeing such a list of dma-buffers. | ||
933 | */ | ||
934 | if (is_rproc_serial(port->out_vq->vdev)) | ||
935 | return -EINVAL; | ||
936 | |||
866 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | 937 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); |
867 | if (ret < 0) | 938 | if (ret < 0) |
868 | return ret; | 939 | return ret; |
869 | 940 | ||
941 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); | ||
942 | if (!buf) | ||
943 | return -ENOMEM; | ||
944 | |||
870 | sgl.n = 0; | 945 | sgl.n = 0; |
871 | sgl.len = 0; | 946 | sgl.len = 0; |
872 | sgl.size = pipe->nrbufs; | 947 | sgl.size = pipe->nrbufs; |
873 | sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); | 948 | sgl.sg = buf->sg; |
874 | if (unlikely(!sgl.sg)) | ||
875 | return -ENOMEM; | ||
876 | |||
877 | sg_init_table(sgl.sg, sgl.size); | 949 | sg_init_table(sgl.sg, sgl.size); |
878 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 950 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); |
879 | if (likely(ret > 0)) | 951 | if (likely(ret > 0)) |
880 | ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); | 952 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); |
881 | 953 | ||
954 | if (unlikely(ret <= 0)) | ||
955 | free_buf(buf, true); | ||
882 | return ret; | 956 | return ret; |
883 | } | 957 | } |
884 | 958 | ||
@@ -927,6 +1001,7 @@ static int port_fops_release(struct inode *inode, struct file *filp) | |||
927 | reclaim_consumed_buffers(port); | 1001 | reclaim_consumed_buffers(port); |
928 | spin_unlock_irq(&port->outvq_lock); | 1002 | spin_unlock_irq(&port->outvq_lock); |
929 | 1003 | ||
1004 | reclaim_dma_bufs(); | ||
930 | /* | 1005 | /* |
931 | * Locks aren't necessary here as a port can't be opened after | 1006 | * Locks aren't necessary here as a port can't be opened after |
932 | * unplug, and if a port isn't unplugged, a kref would already | 1007 | * unplug, and if a port isn't unplugged, a kref would already |
@@ -1031,6 +1106,7 @@ static const struct file_operations port_fops = { | |||
1031 | static int put_chars(u32 vtermno, const char *buf, int count) | 1106 | static int put_chars(u32 vtermno, const char *buf, int count) |
1032 | { | 1107 | { |
1033 | struct port *port; | 1108 | struct port *port; |
1109 | struct scatterlist sg[1]; | ||
1034 | 1110 | ||
1035 | if (unlikely(early_put_chars)) | 1111 | if (unlikely(early_put_chars)) |
1036 | return early_put_chars(vtermno, buf, count); | 1112 | return early_put_chars(vtermno, buf, count); |
@@ -1039,7 +1115,8 @@ static int put_chars(u32 vtermno, const char *buf, int count) | |||
1039 | if (!port) | 1115 | if (!port) |
1040 | return -EPIPE; | 1116 | return -EPIPE; |
1041 | 1117 | ||
1042 | return send_buf(port, (void *)buf, count, false); | 1118 | sg_init_one(sg, buf, count); |
1119 | return __send_to_port(port, sg, 1, count, (void *)buf, false); | ||
1043 | } | 1120 | } |
1044 | 1121 | ||
1045 | /* | 1122 | /* |
@@ -1076,7 +1153,10 @@ static void resize_console(struct port *port) | |||
1076 | return; | 1153 | return; |
1077 | 1154 | ||
1078 | vdev = port->portdev->vdev; | 1155 | vdev = port->portdev->vdev; |
1079 | if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | 1156 | |
1157 | /* Don't test F_SIZE at all if we're rproc: not a valid feature! */ | ||
1158 | if (!is_rproc_serial(vdev) && | ||
1159 | virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) | ||
1080 | hvc_resize(port->cons.hvc, port->cons.ws); | 1160 | hvc_resize(port->cons.hvc, port->cons.ws); |
1081 | } | 1161 | } |
1082 | 1162 | ||
@@ -1260,7 +1340,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | |||
1260 | 1340 | ||
1261 | nr_added_bufs = 0; | 1341 | nr_added_bufs = 0; |
1262 | do { | 1342 | do { |
1263 | buf = alloc_buf(PAGE_SIZE); | 1343 | buf = alloc_buf(vq, PAGE_SIZE, 0); |
1264 | if (!buf) | 1344 | if (!buf) |
1265 | break; | 1345 | break; |
1266 | 1346 | ||
@@ -1268,7 +1348,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) | |||
1268 | ret = add_inbuf(vq, buf); | 1348 | ret = add_inbuf(vq, buf); |
1269 | if (ret < 0) { | 1349 | if (ret < 0) { |
1270 | spin_unlock_irq(lock); | 1350 | spin_unlock_irq(lock); |
1271 | free_buf(buf); | 1351 | free_buf(buf, true); |
1272 | break; | 1352 | break; |
1273 | } | 1353 | } |
1274 | nr_added_bufs++; | 1354 | nr_added_bufs++; |
@@ -1356,10 +1436,18 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1356 | goto free_device; | 1436 | goto free_device; |
1357 | } | 1437 | } |
1358 | 1438 | ||
1359 | /* | 1439 | if (is_rproc_serial(port->portdev->vdev)) |
1360 | * If we're not using multiport support, this has to be a console port | 1440 | /* |
1361 | */ | 1441 | * For rproc_serial assume remote processor is connected. |
1362 | if (!use_multiport(port->portdev)) { | 1442 | * rproc_serial does not want the console port, only |
1443 | * the generic port implementation. | ||
1444 | */ | ||
1445 | port->host_connected = true; | ||
1446 | else if (!use_multiport(port->portdev)) { | ||
1447 | /* | ||
1448 | * If we're not using multiport support, | ||
1449 | * this has to be a console port. | ||
1450 | */ | ||
1363 | err = init_port_console(port); | 1451 | err = init_port_console(port); |
1364 | if (err) | 1452 | if (err) |
1365 | goto free_inbufs; | 1453 | goto free_inbufs; |
@@ -1392,7 +1480,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1392 | 1480 | ||
1393 | free_inbufs: | 1481 | free_inbufs: |
1394 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1482 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
1395 | free_buf(buf); | 1483 | free_buf(buf, true); |
1396 | free_device: | 1484 | free_device: |
1397 | device_destroy(pdrvdata.class, port->dev->devt); | 1485 | device_destroy(pdrvdata.class, port->dev->devt); |
1398 | free_cdev: | 1486 | free_cdev: |
@@ -1434,7 +1522,11 @@ static void remove_port_data(struct port *port) | |||
1434 | 1522 | ||
1435 | /* Remove buffers we queued up for the Host to send us data in. */ | 1523 | /* Remove buffers we queued up for the Host to send us data in. */ |
1436 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) | 1524 | while ((buf = virtqueue_detach_unused_buf(port->in_vq))) |
1437 | free_buf(buf); | 1525 | free_buf(buf, true); |
1526 | |||
1527 | /* Free pending buffers from the out-queue. */ | ||
1528 | while ((buf = virtqueue_detach_unused_buf(port->out_vq))) | ||
1529 | free_buf(buf, true); | ||
1438 | } | 1530 | } |
1439 | 1531 | ||
1440 | /* | 1532 | /* |
@@ -1636,7 +1728,7 @@ static void control_work_handler(struct work_struct *work) | |||
1636 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1728 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1637 | dev_warn(&portdev->vdev->dev, | 1729 | dev_warn(&portdev->vdev->dev, |
1638 | "Error adding buffer to queue\n"); | 1730 | "Error adding buffer to queue\n"); |
1639 | free_buf(buf); | 1731 | free_buf(buf, false); |
1640 | } | 1732 | } |
1641 | } | 1733 | } |
1642 | spin_unlock(&portdev->cvq_lock); | 1734 | spin_unlock(&portdev->cvq_lock); |
@@ -1832,10 +1924,10 @@ static void remove_controlq_data(struct ports_device *portdev) | |||
1832 | return; | 1924 | return; |
1833 | 1925 | ||
1834 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) | 1926 | while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) |
1835 | free_buf(buf); | 1927 | free_buf(buf, true); |
1836 | 1928 | ||
1837 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) | 1929 | while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) |
1838 | free_buf(buf); | 1930 | free_buf(buf, true); |
1839 | } | 1931 | } |
1840 | 1932 | ||
1841 | /* | 1933 | /* |
@@ -1882,11 +1974,15 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
1882 | 1974 | ||
1883 | multiport = false; | 1975 | multiport = false; |
1884 | portdev->config.max_nr_ports = 1; | 1976 | portdev->config.max_nr_ports = 1; |
1885 | if (virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, | 1977 | |
1886 | offsetof(struct virtio_console_config, | 1978 | /* Don't test MULTIPORT at all if we're rproc: not a valid feature! */ |
1887 | max_nr_ports), | 1979 | if (!is_rproc_serial(vdev) && |
1888 | &portdev->config.max_nr_ports) == 0) | 1980 | virtio_config_val(vdev, VIRTIO_CONSOLE_F_MULTIPORT, |
1981 | offsetof(struct virtio_console_config, | ||
1982 | max_nr_ports), | ||
1983 | &portdev->config.max_nr_ports) == 0) { | ||
1889 | multiport = true; | 1984 | multiport = true; |
1985 | } | ||
1890 | 1986 | ||
1891 | err = init_vqs(portdev); | 1987 | err = init_vqs(portdev); |
1892 | if (err < 0) { | 1988 | if (err < 0) { |
@@ -1996,6 +2092,16 @@ static unsigned int features[] = { | |||
1996 | VIRTIO_CONSOLE_F_MULTIPORT, | 2092 | VIRTIO_CONSOLE_F_MULTIPORT, |
1997 | }; | 2093 | }; |
1998 | 2094 | ||
2095 | static struct virtio_device_id rproc_serial_id_table[] = { | ||
2096 | #if IS_ENABLED(CONFIG_REMOTEPROC) | ||
2097 | { VIRTIO_ID_RPROC_SERIAL, VIRTIO_DEV_ANY_ID }, | ||
2098 | #endif | ||
2099 | { 0 }, | ||
2100 | }; | ||
2101 | |||
2102 | static unsigned int rproc_serial_features[] = { | ||
2103 | }; | ||
2104 | |||
1999 | #ifdef CONFIG_PM | 2105 | #ifdef CONFIG_PM |
2000 | static int virtcons_freeze(struct virtio_device *vdev) | 2106 | static int virtcons_freeze(struct virtio_device *vdev) |
2001 | { | 2107 | { |
@@ -2080,6 +2186,20 @@ static struct virtio_driver virtio_console = { | |||
2080 | #endif | 2186 | #endif |
2081 | }; | 2187 | }; |
2082 | 2188 | ||
2189 | /* | ||
2190 | * virtio_rproc_serial refers to __devinit function which causes | ||
2191 | * section mismatch warnings. So use __refdata to silence warnings. | ||
2192 | */ | ||
2193 | static struct virtio_driver __refdata virtio_rproc_serial = { | ||
2194 | .feature_table = rproc_serial_features, | ||
2195 | .feature_table_size = ARRAY_SIZE(rproc_serial_features), | ||
2196 | .driver.name = "virtio_rproc_serial", | ||
2197 | .driver.owner = THIS_MODULE, | ||
2198 | .id_table = rproc_serial_id_table, | ||
2199 | .probe = virtcons_probe, | ||
2200 | .remove = virtcons_remove, | ||
2201 | }; | ||
2202 | |||
2083 | static int __init init(void) | 2203 | static int __init init(void) |
2084 | { | 2204 | { |
2085 | int err; | 2205 | int err; |
@@ -2104,7 +2224,15 @@ static int __init init(void) | |||
2104 | pr_err("Error %d registering virtio driver\n", err); | 2224 | pr_err("Error %d registering virtio driver\n", err); |
2105 | goto free; | 2225 | goto free; |
2106 | } | 2226 | } |
2227 | err = register_virtio_driver(&virtio_rproc_serial); | ||
2228 | if (err < 0) { | ||
2229 | pr_err("Error %d registering virtio rproc serial driver\n", | ||
2230 | err); | ||
2231 | goto unregister; | ||
2232 | } | ||
2107 | return 0; | 2233 | return 0; |
2234 | unregister: | ||
2235 | unregister_virtio_driver(&virtio_console); | ||
2108 | free: | 2236 | free: |
2109 | if (pdrvdata.debugfs_dir) | 2237 | if (pdrvdata.debugfs_dir) |
2110 | debugfs_remove_recursive(pdrvdata.debugfs_dir); | 2238 | debugfs_remove_recursive(pdrvdata.debugfs_dir); |
@@ -2114,7 +2242,10 @@ free: | |||
2114 | 2242 | ||
2115 | static void __exit fini(void) | 2243 | static void __exit fini(void) |
2116 | { | 2244 | { |
2245 | reclaim_dma_bufs(); | ||
2246 | |||
2117 | unregister_virtio_driver(&virtio_console); | 2247 | unregister_virtio_driver(&virtio_console); |
2248 | unregister_virtio_driver(&virtio_rproc_serial); | ||
2118 | 2249 | ||
2119 | class_destroy(pdrvdata.class); | 2250 | class_destroy(pdrvdata.class); |
2120 | if (pdrvdata.debugfs_dir) | 2251 | if (pdrvdata.debugfs_dir) |